prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>onlinesignin.js<|end_file_name|><|fim▁begin|>var clientid = '4c3b2c1b-364c-4ceb-9416-8371dd4ebe3a';
if (/^#access_token=/.test(location.hash)) {
location.assign('/Home/index?auto=1&ss=0' +
'&cors=1' +
'&client_id=' + clientid+
'&origins=https://webdir.online.lync.com/autodiscover/autodiscoverservice.svc/root');
}
$('.loginForm').submit(function (event) {
event.preventDefault();
if (location.hash == '') {
location.assign('https://login.windows.net/common/oauth2/authorize?response_type=token' +
'&client_id=' + clientid+
'&redirect_uri=http://healthcarenocc.azurewebsites.net/' +
'&resource=https://webdir.online.lync.com');
<|fim▁hole|> }
});<|fim▁end|> | |
<|file_name|>game.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Copyright (C) 2007 Sascha Peilicke <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
from random import randrange
from zipfile import ZipFile
from StringIO import StringIO
# Constants
DEFAULT_LEVELPACK = './data/default_pack.zip'
SKILL_EASY = 'Easy' # These values should match the
SKILL_MEDIUM = 'Medium' # the level files!
SKILL_HARD = 'Hard'
FIELD_INVALID = 0 # Constants describing a field on
FIELD_VALID = 1 # the playfield
FIELD_MARKED_VALID = 2
FIELD_MARKED_INVALID = 4
FIELD_OPEN = 8
class Game(object):
"""A paint by numbers game also called nonogram.
"""
def __init__(self, skill=None):
"""Creates a picross game.
Parameters:
skill - Desired skill level (None == random)
"""
self.__level = None
self.__name = None
self.__skill = None
self.__fieldsToOpen = 0
self.__fieldsOpened = 0
self.load(skill=skill)
#
# Miscellaneous methods
#
def _debug_print(self):
print self.getInfo()
print 'go: %s' % (self.__gameOver)
for row in self.__level:
print row
#
# Game information retrieval
#
def getInfo(self):
"""Returns the name, skill and size of the level
"""
return self.__name,self.__skill,len(self.__level)
def getRowHint(self,row):
"""Returns the hint for a specific row.
"""
hint,count = [],0
for columnItem in self.__level[row]:
if columnItem == FIELD_VALID:
count += 1
else:
if count > 0:
hint.append(count)
count = 0
if count > 0:
hint.append(count)
if not hint:
hint.append(0)
return hint
def getColumnHint(self,col):
"""Returns the hint for a specific column.
"""
hint,count = [],0
for row in self.__level:
if row[col] == FIELD_VALID:
count += 1
else:
if count > 0:
hint.append(count)
count = 0
if count > 0:
hint.append(count)
if not hint:
hint.append(0)
return hint
def getField(self,col,row):
return self.__level[row][col]
def isGameWon(self):
return self.__fieldsOpened == self.__fieldsToOpen
#
# Game manipulation methods
#
def restart(self):
"""Reinitializes the current game
"""
for i, row in enumerate(self.__level):
for j, field in enumerate(row):
if field == FIELD_OPEN or field == FIELD_MARKED_VALID:
self.__level[i][j] = FIELD_VALID
elif field == FIELD_MARKED_INVALID:
self.__level[i][j] = FIELD_INVALID
self.__gameOver = False
self.__fieldsOpened = 0
def openField(self,col,row):
field = self.__level[row][col]
if field == FIELD_VALID or field == FIELD_MARKED_VALID:
self.__level[row][col] = FIELD_OPEN
self.__fieldsOpened += 1
return True
else:
return False
def markField(self,col,row):
field = self.__level[row][col]
if field == FIELD_VALID:
self.__level[row][col] = FIELD_MARKED_VALID<|fim▁hole|> elif field == FIELD_INVALID:
self.__level[row][col] = FIELD_MARKED_INVALID
elif field == FIELD_MARKED_INVALID:
self.__level[row][col] = FIELD_INVALID
return self.__level[row][col]
def load(self,file=DEFAULT_LEVELPACK,skill=None):
"""Loads a level either from a zipped levelpack or from a textfile.
Parameters:
file - Can be a file path or zipped levelpack
skill - Desired level skill (None == random)
"""
if file.endswith('.lvl'):
# Set the skill variable
if file.startswith('easy'): self.__skill = SKILL_EASY
elif file.startswith('medium'): self.__skill = SKILL_MEDIUM
elif file.startswith('hard'): self.__skill = SKILL_HARD
self.__loadFileContent(open(file,'r'))
elif file.endswith('.zip'):
zip = ZipFile(file)
# We have to select from which files in the zipfile we
# want to choose randomly based on the level's skill
candidates = []
if skill == SKILL_EASY:
for file in zip.namelist():
if file.startswith('easy'):
candidates.append(file)
elif skill == SKILL_MEDIUM:
for file in zip.namelist():
if file.startswith('medium'):
candidates.append(file)
elif skill == SKILL_HARD:
for file in zip.namelist():
if file.startswith('hard'):
candidates.append(file)
# This should never happen in a good levelpack, but if it
# is malformed, just pick something!
if not candidates:
candidates = zip.namelist()
# Select one candidate randomly
which = candidates[randrange(len(candidates))]
# Set the skill variable
if which.startswith('easy'): self.__skill = SKILL_EASY
elif which.startswith('medium'):self.__skill = SKILL_MEDIUM
elif which.startswith('hard'): self.__skill = SKILL_HARD
# Read from zipfile and load file content
buf = zip.read(which)
self.__loadFileContent(StringIO(buf))
def __loadFileContent(self,file):
"""Actually loads the level data from a file.
"""
self.__level = []
for line in file:
if line.startswith('name:'):
self.__name = line[5:].strip()
elif line[0] == '0' or line[0] == '1':
row = []
for field in line:
if field == '0':
row.append(FIELD_INVALID)
elif field == '1':
self.__fieldsToOpen += 1
row.append(FIELD_VALID)
self.__level.append(row)<|fim▁end|> | elif field == FIELD_MARKED_VALID:
self.__level[row][col] = FIELD_VALID |
<|file_name|>4_04_system_of_systems.rs<|end_file_name|><|fim▁begin|>// The Nature of Code
// Daniel Shiffman
// http://natureofcode.com
//
// example 4-04: System of Systems
use nannou::prelude::*;
fn main() {
nannou::app(model).update(update).run();
}
struct Model {
systems: Vec<ParticleSystem>,
}
// A simple particle type
struct Particle {
position: Point2<f32>,
velocity: Vector2<f32>,
acceleration: Vector2<f32>,
life_span: f32,
}
impl Particle {
fn new(l: Point2<f32>) -> Self {
let acceleration = vec2(0.0, 0.05);
let velocity = vec2(random_f32() * 2.0 - 1.0, random_f32() - 2.0);
let position = l;
let life_span = 255.0;
Particle {
acceleration,
velocity,
position,
life_span,
}
}
// Method to update position
fn update(&mut self) {
self.velocity += self.acceleration;
self.position -= self.velocity;
self.life_span -= 2.0;
}
// Method to display
fn display(&self, draw: &Draw) {
let size = 12.0;
draw.ellipse()
.xy(self.position)
.w_h(size, size)
.rgba(0.5, 0.5, 0.5, self.life_span / 255.0)
.stroke(rgba(0.0, 0.0, 0.0, self.life_span / 255.0))
.stroke_weight(2.0);
}
// Is the particle still useful?
fn is_dead(&self) -> bool {
if self.life_span < 0.0 {
true
} else {
false
}
}
}
struct ParticleSystem {
particles: Vec<Particle>,
origin: Point2<f32>,
}
impl ParticleSystem {
fn new(num: i32, position: Point2<f32>) -> Self {
let origin = position; // An origin point for where particles are birthed<|fim▁hole|> particles.push(Particle::new(origin)); // Add "num" amount of particles to the vector
}
ParticleSystem { origin, particles }
}
fn add_particle(&mut self) {
self.particles.push(Particle::new(self.origin));
}
fn update(&mut self) {
for i in (0..self.particles.len()).rev() {
self.particles[i].update();
if self.particles[i].is_dead() {
self.particles.remove(i);
}
}
}
fn draw(&self, draw: &Draw) {
for p in self.particles.iter() {
p.display(&draw);
}
}
// A method to test if the particle system still has particles
fn _dead(&self) -> bool {
if self.particles.is_empty() {
true
} else {
false
}
}
}
fn model(app: &App) -> Model {
app.new_window()
.size(640, 360)
.mouse_pressed(mouse_pressed)
.view(view)
.build()
.unwrap();
let systems = Vec::new();
Model { systems }
}
fn mouse_pressed(app: &App, m: &mut Model, _button: MouseButton) {
m.systems
.push(ParticleSystem::new(1, pt2(app.mouse.x, app.mouse.y)));
}
fn update(_app: &App, m: &mut Model, _update: Update) {
for ps in m.systems.iter_mut() {
ps.add_particle();
ps.update();
}
}
fn view(app: &App, m: &Model, frame: Frame) {
// Begin drawing
let draw = app.draw();
draw.background().color(WHITE);
for i in 0..m.systems.len() {
m.systems[i].draw(&draw);
}
// Write the result of our drawing to the window's frame.
draw.to_frame(app, &frame).unwrap();
}<|fim▁end|> | let mut particles = Vec::new(); // Initialise the Vector
for _i in 0..num { |
<|file_name|>diff_index_scanner.go<|end_file_name|><|fim▁begin|>package lfs
import (
"bufio"
"fmt"
"strconv"
"strings"
"github.com/git-lfs/git-lfs/errors"
"github.com/git-lfs/git-lfs/git"
)
// Status represents the status of a file that appears in the output of `git
// diff-index`.
//
// More information about each of its valid instances can be found:<|fim▁hole|>type DiffIndexStatus rune
const (
StatusAddition DiffIndexStatus = 'A'
StatusCopy DiffIndexStatus = 'C'
StatusDeletion DiffIndexStatus = 'D'
StatusModification DiffIndexStatus = 'M'
StatusRename DiffIndexStatus = 'R'
StatusTypeChange DiffIndexStatus = 'T'
StatusUnmerged DiffIndexStatus = 'U'
StatusUnknown DiffIndexStatus = 'X'
)
// String implements fmt.Stringer by returning a human-readable name for each
// status.
func (s DiffIndexStatus) String() string {
switch s {
case StatusAddition:
return "addition"
case StatusCopy:
return "copy"
case StatusDeletion:
return "deletion"
case StatusModification:
return "modification"
case StatusRename:
return "rename"
case StatusTypeChange:
return "change"
case StatusUnmerged:
return "unmerged"
case StatusUnknown:
return "unknown"
}
return "<unknown>"
}
// Format implements fmt.Formatter. If printed as "%+d", "%+s", or "%+v", the
// status will be written out as an English word: i.e., "addition", "copy",
// "deletion", etc.
//
// If the '+' flag is not given, the shorthand will be used instead: 'A', 'C',
// and 'D', respectively.
//
// If any other format verb is given, this function will panic().
func (s DiffIndexStatus) Format(state fmt.State, c rune) {
switch c {
case 'd', 's', 'v':
if state.Flag('+') {
state.Write([]byte(s.String()))
} else {
state.Write([]byte{byte(rune(s))})
}
default:
panic(fmt.Sprintf("cannot format %v for DiffIndexStatus", c))
}
}
// DiffIndexEntry holds information about a single item in the results of a `git
// diff-index` command.
type DiffIndexEntry struct {
// SrcMode is the file mode of the "src" file, stored as a string-based
// octal.
SrcMode string
// DstMode is the file mode of the "dst" file, stored as a string-based
// octal.
DstMode string
// SrcSha is the Git blob ID of the "src" file.
SrcSha string
// DstSha is the Git blob ID of the "dst" file.
DstSha string
// Status is the status of the file in the index.
Status DiffIndexStatus
// StatusScore is the optional "score" associated with a particular
// status.
StatusScore int
// SrcName is the name of the file in its "src" state as it appears in
// the index.
SrcName string
// DstName is the name of the file in its "dst" state as it appears in
// the index.
DstName string
}
// DiffIndexScanner scans the output of the `git diff-index` command.
type DiffIndexScanner struct {
// next is the next entry scanned by the Scanner.
next *DiffIndexEntry
// err is any error that the Scanner encountered while scanning.
err error
// from is the underlying scanner, scanning the `git diff-index`
// command's stdout.
from *bufio.Scanner
}
// NewDiffIndexScanner initializes a new `DiffIndexScanner` scanning at the
// given ref, "ref".
//
// If "cache" is given, the DiffIndexScanner will scan for differences between
// the given ref and the index. If "cache" is _not_ given, DiffIndexScanner will
// scan for differences between the given ref and the currently checked out
// tree.
//
// If "refresh" is given, the DiffIndexScanner will refresh the index. This is
// probably what you want in all cases except fsck, where invoking a filtering
// operation would be undesirable due to the possibility of corruption. It can
// also be disabled where another operation will have refreshed the index.
//
// If any error was encountered in starting the command or closing its `stdin`,
// that error will be returned immediately. Otherwise, a `*DiffIndexScanner`
// will be returned with a `nil` error.
func NewDiffIndexScanner(ref string, cached bool, refresh bool) (*DiffIndexScanner, error) {
scanner, err := git.DiffIndex(ref, cached, refresh)
if err != nil {
return nil, err
}
return &DiffIndexScanner{
from: scanner,
}, nil
}
// Scan advances the scan line and yields either a new value for Entry(), or an
// Err(). It returns true or false, whether or not it can continue scanning for
// more entries.
func (s *DiffIndexScanner) Scan() bool {
if !s.prepareScan() {
return false
}
s.next, s.err = s.scan(s.from.Text())
if s.err != nil {
s.err = errors.Wrap(s.err, "scan")
}
return s.err == nil
}
// Entry returns the last entry that was Scan()'d by the DiffIndexScanner.
func (s *DiffIndexScanner) Entry() *DiffIndexEntry { return s.next }
// Entry returns the last error that was encountered by the DiffIndexScanner.
func (s *DiffIndexScanner) Err() error { return s.err }
// prepareScan clears out the results from the last Scan() loop, and advances
// the internal scanner to fetch a new line of Text().
func (s *DiffIndexScanner) prepareScan() bool {
s.next, s.err = nil, nil
if !s.from.Scan() {
s.err = s.from.Err()
return false
}
return true
}
// scan parses the given line and returns a `*DiffIndexEntry` or an error,
// depending on whether or not the parse was successful.
func (s *DiffIndexScanner) scan(line string) (*DiffIndexEntry, error) {
// Format is:
// :100644 100644 c5b3d83a7542255ec7856487baa5e83d65b1624c 9e82ac1b514be060945392291b5b3108c22f6fe3 M foo.gif
// :<old mode> <new mode> <old sha1> <new sha1> <status>\t<file name>[\t<file name>]
parts := strings.Split(line, "\t")
if len(parts) < 2 {
return nil, errors.Errorf("invalid line: %s", line)
}
desc := strings.Fields(parts[0])
if len(desc) < 5 {
return nil, errors.Errorf("invalid description: %s", parts[0])
}
entry := &DiffIndexEntry{
SrcMode: strings.TrimPrefix(desc[0], ":"),
DstMode: desc[1],
SrcSha: desc[2],
DstSha: desc[3],
Status: DiffIndexStatus(rune(desc[4][0])),
SrcName: parts[1],
}
if score, err := strconv.Atoi(desc[4][1:]); err != nil {
entry.StatusScore = score
}
if len(parts) > 2 {
entry.DstName = parts[2]
}
return entry, nil
}<|fim▁end|> | // https://git-scm.com/docs/git-diff-index |
<|file_name|>RegionPass.cpp<|end_file_name|><|fim▁begin|>//===- RegionPass.cpp - Region Pass and Region Pass Manager ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements RegionPass and RGPassManager. All region optimization
// and transformation passes are derived from RegionPass. RGPassManager is
// responsible for managing RegionPasses.
// Most of this code has been COPIED from LoopPass.cpp
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/RegionPass.h"
#include "llvm/Analysis/RegionIterator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "regionpassmgr"
//===----------------------------------------------------------------------===//
// RGPassManager
//
char RGPassManager::ID = 0;
RGPassManager::RGPassManager()
: FunctionPass(ID), PMDataManager() {
skipThisRegion = false;
redoThisRegion = false;
RI = nullptr;
CurrentRegion = nullptr;
}
// Recurse through all subregions and all regions into RQ.
static void addRegionIntoQueue(Region &R, std::deque<Region *> &RQ) {
RQ.push_back(&R);
for (const auto &E : R)
addRegionIntoQueue(*E, RQ);
}
/// Pass Manager itself does not invalidate any analysis info.
void RGPassManager::getAnalysisUsage(AnalysisUsage &Info) const {
Info.addRequired<RegionInfoPass>();
Info.setPreservesAll();
}
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the function, and if so, return true.
bool RGPassManager::runOnFunction(Function &F) {
RI = &getAnalysis<RegionInfoPass>().getRegionInfo();
bool Changed = false;
// Collect inherited analysis from Module level pass manager.
populateInheritedAnalysis(TPM->activeStack);
addRegionIntoQueue(*RI->getTopLevelRegion(), RQ);
if (RQ.empty()) // No regions, skip calling finalizers
return false;
// Initialization
for (Region *R : RQ) {
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
RegionPass *RP = (RegionPass *)getContainedPass(Index);
Changed |= RP->doInitialization(R, *this);
}
}
// Walk Regions
while (!RQ.empty()) {
CurrentRegion = RQ.back();
skipThisRegion = false;
redoThisRegion = false;
// Run all passes on the current Region.
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
RegionPass *P = (RegionPass*)getContainedPass(Index);
if (isPassDebuggingExecutionsOrMore()) {
dumpPassInfo(P, EXECUTION_MSG, ON_REGION_MSG,
CurrentRegion->getNameStr());
dumpRequiredSet(P);
}
initializeAnalysisImpl(P);
{
PassManagerPrettyStackEntry X(P, *CurrentRegion->getEntry());
TimeRegion PassTimer(getPassTimer(P));
Changed |= P->runOnRegion(CurrentRegion, *this);
}
if (isPassDebuggingExecutionsOrMore()) {
if (Changed)
dumpPassInfo(P, MODIFICATION_MSG, ON_REGION_MSG,
skipThisRegion ? "<deleted>" :
CurrentRegion->getNameStr());
dumpPreservedSet(P);
}
if (!skipThisRegion) {
// Manually check that this region is still healthy. This is done
// instead of relying on RegionInfo::verifyRegion since RegionInfo
// is a function pass and it's really expensive to verify every
// Region in the function every time. That level of checking can be
// enabled with the -verify-region-info option.
{
TimeRegion PassTimer(getPassTimer(P));
CurrentRegion->verifyRegion();
}
// Then call the regular verifyAnalysis functions.
verifyPreservedAnalysis(P);
}
removeNotPreservedAnalysis(P);
recordAvailableAnalysis(P);
removeDeadPasses(P,
(!isPassDebuggingExecutionsOrMore() || skipThisRegion) ?
"<deleted>" : CurrentRegion->getNameStr(),
ON_REGION_MSG);
if (skipThisRegion)
// Do not run other passes on this region.
break;
}
// If the region was deleted, release all the region passes. This frees up
// some memory, and avoids trouble with the pass manager trying to call
// verifyAnalysis on them.
if (skipThisRegion)
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
Pass *P = getContainedPass(Index);
freePass(P, "<deleted>", ON_REGION_MSG);
}
// Pop the region from queue after running all passes.
RQ.pop_back();
if (redoThisRegion)
RQ.push_back(CurrentRegion);
// Free all region nodes created in region passes.
RI->clearNodeCache();
}
// Finalization
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
RegionPass *P = (RegionPass*)getContainedPass(Index);
Changed |= P->doFinalization();
}
// Print the region tree after all pass.
DEBUG(
dbgs() << "\nRegion tree of function " << F.getName()
<< " after all region Pass:\n";
RI->dump();
dbgs() << "\n";
);
return Changed;
}
/// Print passes managed by this manager
void RGPassManager::dumpPassStructure(unsigned Offset) {
errs().indent(Offset*2) << "Region Pass Manager\n";
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
Pass *P = getContainedPass(Index);
P->dumpPassStructure(Offset + 1);
dumpLastUses(P, Offset+1);
}
}
namespace {<|fim▁hole|>//===----------------------------------------------------------------------===//
// PrintRegionPass
class PrintRegionPass : public RegionPass {
private:
std::string Banner;
raw_ostream &Out; // raw_ostream to print on.
public:
static char ID;
PrintRegionPass(const std::string &B, raw_ostream &o)
: RegionPass(ID), Banner(B), Out(o) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
bool runOnRegion(Region *R, RGPassManager &RGM) override {
Out << Banner;
for (const auto *BB : R->blocks()) {
if (BB)
BB->print(Out);
else
Out << "Printing <null> Block";
}
return false;
}
};
char PrintRegionPass::ID = 0;
} //end anonymous namespace
//===----------------------------------------------------------------------===//
// RegionPass
// Check if this pass is suitable for the current RGPassManager, if
// available. This pass P is not suitable for a RGPassManager if P
// is not preserving higher level analysis info used by other
// RGPassManager passes. In such case, pop RGPassManager from the
// stack. This will force assignPassManager() to create new
// LPPassManger as expected.
void RegionPass::preparePassManager(PMStack &PMS) {
// Find RGPassManager
while (!PMS.empty() &&
PMS.top()->getPassManagerType() > PMT_RegionPassManager)
PMS.pop();
// If this pass is destroying high level information that is used
// by other passes that are managed by LPM then do not insert
// this pass in current LPM. Use new RGPassManager.
if (PMS.top()->getPassManagerType() == PMT_RegionPassManager &&
!PMS.top()->preserveHigherLevelAnalysis(this))
PMS.pop();
}
/// Assign pass manager to manage this pass.
void RegionPass::assignPassManager(PMStack &PMS,
PassManagerType PreferredType) {
// Find RGPassManager
while (!PMS.empty() &&
PMS.top()->getPassManagerType() > PMT_RegionPassManager)
PMS.pop();
RGPassManager *RGPM;
// Create new Region Pass Manager if it does not exist.
if (PMS.top()->getPassManagerType() == PMT_RegionPassManager)
RGPM = (RGPassManager*)PMS.top();
else {
assert (!PMS.empty() && "Unable to create Region Pass Manager");
PMDataManager *PMD = PMS.top();
// [1] Create new Region Pass Manager
RGPM = new RGPassManager();
RGPM->populateInheritedAnalysis(PMS);
// [2] Set up new manager's top level manager
PMTopLevelManager *TPM = PMD->getTopLevelManager();
TPM->addIndirectPassManager(RGPM);
// [3] Assign manager to manage this new manager. This may create
// and push new managers into PMS
TPM->schedulePass(RGPM);
// [4] Push new manager into PMS
PMS.push(RGPM);
}
RGPM->add(this);
}
/// Get the printer pass
Pass *RegionPass::createPrinterPass(raw_ostream &O,
const std::string &Banner) const {
return new PrintRegionPass(Banner, O);
}<|fim▁end|> | |
<|file_name|>qubole_operator.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,<|fim▁hole|># specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.qubole.operators.qubole`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.qubole.operators.qubole import QuboleOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.qubole.operators.qubole`.",
DeprecationWarning,
stacklevel=2,
)<|fim▁end|> | # software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url<|fim▁hole|>from . import views
urlpatterns = [
# Captures special abbreviations and redirects to UOS websites
url(r"^(?P<site>bb|udc|ms|uos)/?$", views.redirect_to_uos),
# All website related requests link to views.layout
# as the layout loads other dependencies as per request
url(r"", views.layout),
]<|fim▁end|> | |
<|file_name|>mnist.py<|end_file_name|><|fim▁begin|>__author__ = 'igor'
"""
构建 mnist network
构建 Graph
1.inference() - Builds the model as far as is required for running the network
forward to make predictions.
2.loss() -Adds to the inference model the layers required to generate loss
3.training() - Adds to the loss model the Ops required to generate and
apply gradients.
"""
import os.path
import math
import tensorflow.python.platform
import tensorflow as tf
# THE MNIST dataset has 10 classes
NUM_CLASSES = 10
# MNIST 的图像是28×28 pixedls
IMAGE_SIZE = 28
# 特征的维度
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
def inference(images, hidden1_units, hidden2_units):
'''
构建 MNIST model,向前传播
:param images: Image placeholder,输入
:param hidden1_units: 第一个隐藏层的大小
:param hidden2_units: 第二个隐藏层的大小
:return:
softmax_linear:Output tensor with the computed logits.
'''
# Hidden 1
with tf.name_scope("hidden1"):
weights = tf.Variable( # 输入层到输出层的weights
tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name="weights")
biases = tf.Variable(
tf.zeros([hidden1_units]),
name='biases'
)
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases) # 激活函数是rectifier
# Hidden 2
with tf.name_scope('hidden2'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope('soft_max_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, NUM_CLASSES],
stddev=1.0 / math.sqrt(float(hidden2_units))),
name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases # 激活层是横等函数
return logits
def loss(logits, labels):
'''
从logits 和labels 计算损失
:param logits: Logits tensor,float-[batch_size,NUM_CLASS]
:param labels: Labels tensor,int32-[batch_size]
:return:Loss tensor
'''
# 用one-hot的方式对labels_placeholder进行编码
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)<|fim▁hole|> cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
one_hot_labels,
name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
def training(loss, learning_rate):
'''
设置 training Ops
:param loss:
:param learning_rate:
:return:
'''
tf.scalar_summary(loss.op.name, loss)
# 梯度下降
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evalution(logits, labels):
correct = tf.nn.in_top_k(logits, labels, 1)
return tf.reduce_sum(tf.cast(correct, tf.int32))
if __name__ == '__main__':
pass<|fim▁end|> | concated = tf.concat(1, [indices, labels])
one_hot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, NUM_CLASSES]), 1.0, 0.0) |
<|file_name|>test_dauth.py<|end_file_name|><|fim▁begin|>from nintendo import dauth, switch
from anynet import http
import pytest
<|fim▁hole|> "Host: 127.0.0.1:12345\r\n" \
"User-Agent: libcurl (nnDauth; 16f4553f-9eee-4e39-9b61-59bc7c99b7c8; SDK 12.3.0.0)\r\n" \
"Accept: */*\r\n" \
"X-Nintendo-PowerState: FA\r\n" \
"Content-Length: 17\r\n" \
"Content-Type: application/x-www-form-urlencoded\r\n\r\n" \
"key_generation=11"
TOKEN_REQUEST = \
"POST /v6/device_auth_token HTTP/1.1\r\n" \
"Host: 127.0.0.1:12345\r\n" \
"User-Agent: libcurl (nnDauth; 16f4553f-9eee-4e39-9b61-59bc7c99b7c8; SDK 12.3.0.0)\r\n" \
"Accept: */*\r\n" \
"X-Nintendo-PowerState: FA\r\n" \
"Content-Length: 211\r\n" \
"Content-Type: application/x-www-form-urlencoded\r\n\r\n" \
"challenge=vaNgVZZH7gUse0y3t8Cksuln-TAVtvBmcD-ow59qp0E=&" \
"client_id=8f849b5d34778d8e&ist=false&key_generation=11&" \
"system_version=CusHY#000c0000#C-BynYNPXdQJNBZjx02Hizi8lRUSIKLwPGa5p8EY1uo=&" \
"mac=xRB_6mgnNqrnF9DRsEpYMg"
@pytest.mark.anyio
async def test_dauth():
async def handler(client, request):
if request.path == "/v6/challenge":
assert request.encode().decode() == CHALLENGE_REQUEST
response = http.HTTPResponse(200)
response.json = {
"challenge": "vaNgVZZH7gUse0y3t8Cksuln-TAVtvBmcD-ow59qp0E=",
"data": "dlL7ZBNSLmYo1hUlKYZiUA=="
}
return response
else:
assert request.encode().decode() == TOKEN_REQUEST
response = http.HTTPResponse(200)
response.json = {
"device_auth_token": "device token"
}
return response
async with http.serve(handler, "127.0.0.1", 12345):
keys = switch.KeySet()
keys["aes_kek_generation_source"] = bytes.fromhex("485d45ad27c07c7e538c0183f90ee845")
keys["master_key_0a"] = bytes.fromhex("37eed242e0f2ce6f8371e783c1a6a0ae")
client = dauth.DAuthClient(keys)
client.set_url("127.0.0.1:12345")
client.set_system_version(1200)
client.set_context(None)
response = await client.device_token(client.BAAS)
token = response["device_auth_token"]
assert token == "device token"<|fim▁end|> | CHALLENGE_REQUEST = \
"POST /v6/challenge HTTP/1.1\r\n" \
|
<|file_name|>jqueryui_integration.component.ts<|end_file_name|><|fim▁begin|>/**
* Created by jebaprince on 1/23/2017.
*/
import {Component, AfterViewInit, ViewChild, ElementRef} from '@angular/core';
import {Inject} from '@angular/core';
declare var $:any;
@Component({
templateUrl: 'resources/app/app_template/jqueryui_integration/jqueryui_integration.html'
})
export class JqueryUIIntegrationComponent implements AfterViewInit {
@ViewChild('jqueryElement') el:ElementRef;
constructor() {
}
ngAfterViewInit() {
console.log(this);
$(this.el.nativeElement).draggable();<|fim▁hole|>}<|fim▁end|> | } |
<|file_name|>ty.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
A mini version of ast::Ty, which is easier to use, and features an
explicit `Self` type to use when specifying impls to be derived.
*/
pub use self::PtrTy::*;
pub use self::Ty::*;
use ast;
use ast::{Expr,Generics,Ident};
use ext::base::ExtCtxt;
use ext::build::AstBuilder;
use codemap::{Span,respan};
use owned_slice::OwnedSlice;
use parse::token::special_idents;
use ptr::P;
/// The types of pointers
#[deriving(Clone)]
pub enum PtrTy<'a> {
/// &'lifetime mut
Borrowed(Option<&'a str>, ast::Mutability),
/// *mut
Raw(ast::Mutability),
}
/// A path, e.g. `::std::option::Option::<int>` (global). Has support
/// for type parameters and a lifetime.
#[deriving(Clone)]
pub struct Path<'a> {
pub path: Vec<&'a str> ,
pub lifetime: Option<&'a str>,
pub params: Vec<Box<Ty<'a>>>,
pub global: bool,
}
impl<'a> Path<'a> {
pub fn new<'r>(path: Vec<&'r str> ) -> Path<'r> {
Path::new_(path, None, Vec::new(), true)
}
pub fn new_local<'r>(path: &'r str) -> Path<'r> {
Path::new_(vec!( path ), None, Vec::new(), false)
}
pub fn new_<'r>(path: Vec<&'r str> ,
lifetime: Option<&'r str>,
params: Vec<Box<Ty<'r>>>,
global: bool)
-> Path<'r> {
Path {
path: path,
lifetime: lifetime,
params: params,
global: global
}
}
pub fn to_ty(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> P<ast::Ty> {
cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
}
pub fn to_path(&self,
cx: &ExtCtxt,<|fim▁hole|> self_generics: &Generics)
-> ast::Path {
let idents = self.path.iter().map(|s| cx.ident_of(*s)).collect();
let lt = mk_lifetimes(cx, span, &self.lifetime);
let tys = self.params.iter().map(|t| t.to_ty(cx, span, self_ty, self_generics)).collect();
cx.path_all(span, self.global, idents, lt, tys)
}
}
/// A type. Supports pointers, Self, and literals
#[deriving(Clone)]
pub enum Ty<'a> {
Self,
/// &/Box/ Ty
Ptr(Box<Ty<'a>>, PtrTy<'a>),
/// mod::mod::Type<[lifetime], [Params...]>, including a plain type
/// parameter, and things like `int`
Literal(Path<'a>),
/// includes unit
Tuple(Vec<Ty<'a>> )
}
pub fn borrowed_ptrty<'r>() -> PtrTy<'r> {
Borrowed(None, ast::MutImmutable)
}
pub fn borrowed<'r>(ty: Box<Ty<'r>>) -> Ty<'r> {
Ptr(ty, borrowed_ptrty())
}
pub fn borrowed_explicit_self<'r>() -> Option<Option<PtrTy<'r>>> {
Some(Some(borrowed_ptrty()))
}
pub fn borrowed_self<'r>() -> Ty<'r> {
borrowed(box Self)
}
pub fn nil_ty<'r>() -> Ty<'r> {
Tuple(Vec::new())
}
fn mk_lifetime(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Option<ast::Lifetime> {
match *lt {
Some(ref s) => Some(cx.lifetime(span, cx.ident_of(*s).name)),
None => None
}
}
fn mk_lifetimes(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Vec<ast::Lifetime> {
match *lt {
Some(ref s) => vec!(cx.lifetime(span, cx.ident_of(*s).name)),
None => vec!()
}
}
impl<'a> Ty<'a> {
pub fn to_ty(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> P<ast::Ty> {
match *self {
Ptr(ref ty, ref ptr) => {
let raw_ty = ty.to_ty(cx, span, self_ty, self_generics);
match *ptr {
Borrowed(ref lt, mutbl) => {
let lt = mk_lifetime(cx, span, lt);
cx.ty_rptr(span, raw_ty, lt, mutbl)
}
Raw(mutbl) => cx.ty_ptr(span, raw_ty, mutbl)
}
}
Literal(ref p) => { p.to_ty(cx, span, self_ty, self_generics) }
Self => {
cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
}
Tuple(ref fields) => {
let ty = ast::TyTup(fields.iter()
.map(|f| f.to_ty(cx, span, self_ty, self_generics))
.collect());
cx.ty(span, ty)
}
}
}
pub fn to_path(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> ast::Path {
match *self {
Self => {
let self_params = self_generics.ty_params.map(|ty_param| {
cx.ty_ident(span, ty_param.ident)
});
let lifetimes = self_generics.lifetimes.iter()
.map(|d| d.lifetime)
.collect();
cx.path_all(span, false, vec!(self_ty), lifetimes,
self_params.into_vec())
}
Literal(ref p) => {
p.to_path(cx, span, self_ty, self_generics)
}
Ptr(..) => { cx.span_bug(span, "pointer in a path in generic `deriving`") }
Tuple(..) => { cx.span_bug(span, "tuple in a path in generic `deriving`") }
}
}
}
fn mk_ty_param(cx: &ExtCtxt, span: Span, name: &str,
bounds: &[Path], unbound: Option<ast::TraitRef>,
self_ident: Ident, self_generics: &Generics) -> ast::TyParam {
let bounds =
bounds.iter().map(|b| {
let path = b.to_path(cx, span, self_ident, self_generics);
cx.typarambound(path)
}).collect();
cx.typaram(span, cx.ident_of(name), bounds, unbound, None)
}
fn mk_generics(lifetimes: Vec<ast::LifetimeDef>, ty_params: Vec<ast::TyParam>)
-> Generics {
Generics {
lifetimes: lifetimes,
ty_params: OwnedSlice::from_vec(ty_params),
where_clause: ast::WhereClause {
id: ast::DUMMY_NODE_ID,
predicates: Vec::new(),
},
}
}
/// Lifetimes and bounds on type parameters
#[deriving(Clone)]
pub struct LifetimeBounds<'a> {
pub lifetimes: Vec<(&'a str, Vec<&'a str>)>,
pub bounds: Vec<(&'a str, Option<ast::TraitRef>, Vec<Path<'a>>)>,
}
impl<'a> LifetimeBounds<'a> {
pub fn empty() -> LifetimeBounds<'a> {
LifetimeBounds {
lifetimes: Vec::new(), bounds: Vec::new()
}
}
pub fn to_generics(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> Generics {
let lifetimes = self.lifetimes.iter().map(|&(ref lt, ref bounds)| {
let bounds =
bounds.iter().map(
|b| cx.lifetime(span, cx.ident_of(*b).name)).collect();
cx.lifetime_def(span, cx.ident_of(*lt).name, bounds)
}).collect();
let ty_params = self.bounds.iter().map(|t| {
match t {
&(ref name, ref unbound, ref bounds) => {
mk_ty_param(cx,
span,
*name,
bounds.as_slice(),
unbound.clone(),
self_ty,
self_generics)
}
}
}).collect();
mk_generics(lifetimes, ty_params)
}
}
pub fn get_explicit_self(cx: &ExtCtxt, span: Span, self_ptr: &Option<PtrTy>)
-> (P<Expr>, ast::ExplicitSelf) {
// this constructs a fresh `self` path, which will match the fresh `self` binding
// created below.
let self_path = cx.expr_self(span);
match *self_ptr {
None => {
(self_path, respan(span, ast::SelfValue(special_idents::self_)))
}
Some(ref ptr) => {
let self_ty = respan(
span,
match *ptr {
Borrowed(ref lt, mutbl) => {
let lt = lt.map(|s| cx.lifetime(span, cx.ident_of(s).name));
ast::SelfRegion(lt, mutbl, special_idents::self_)
}
Raw(_) => cx.span_bug(span, "attempted to use *self in deriving definition")
});
let self_expr = cx.expr_deref(span, self_path);
(self_expr, self_ty)
}
}
}<|fim▁end|> | span: Span,
self_ty: Ident, |
<|file_name|>alg.py<|end_file_name|><|fim▁begin|>import astra
def gpu_fp(pg, vg, v):
v_id = astra.data2d.create('-vol', vg, v)
rt_id = astra.data2d.create('-sino', pg)
fp_cfg = astra.astra_dict('FP_CUDA')
fp_cfg['VolumeDataId'] = v_id
fp_cfg['ProjectionDataId'] = rt_id
fp_id = astra.algorithm.create(fp_cfg)
astra.algorithm.run(fp_id)
out = astra.data2d.get(rt_id)
astra.algorithm.delete(fp_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
def gpu_bp(pg, vg, rt, supersampling=1):
v_id = astra.data2d.create('-vol', vg)
rt_id = astra.data2d.create('-sino', pg, data=rt)
bp_cfg = astra.astra_dict('BP_CUDA')
bp_cfg['ReconstructionDataId'] = v_id
bp_cfg['ProjectionDataId'] = rt_id
bp_id = astra.algorithm.create(bp_cfg)
astra.algorithm.run(bp_id)
out = astra.data2d.get(v_id)
astra.algorithm.delete(bp_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
def gpu_fbp(pg, vg, rt):
rt_id = astra.data2d.create('-sino', pg, data=rt)
v_id = astra.data2d.create('-vol', vg)
fbp_cfg = astra.astra_dict('FBP_CUDA')
fbp_cfg['ReconstructionDataId'] = v_id
fbp_cfg['ProjectionDataId'] = rt_id
#fbp_cfg['FilterType'] = 'none'
fbp_id = astra.algorithm.create(fbp_cfg)
astra.algorithm.run(fbp_id, 100)
out = astra.data2d.get(v_id)
astra.algorithm.delete(fbp_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
def gpu_sirt(pg, vg, rt, n_iters=100):
rt_id = astra.data2d.create('-sino', pg, data=rt)
v_id = astra.data2d.create('-vol', vg)
sirt_cfg = astra.astra_dict('SIRT_CUDA')
sirt_cfg['ReconstructionDataId'] = v_id
sirt_cfg['ProjectionDataId'] = rt_id
#sirt_cfg['option'] = {}
#sirt_cfg['option']['MinConstraint'] = 0
sirt_id = astra.algorithm.create(sirt_cfg)
astra.algorithm.run(sirt_id, n_iters)
out = astra.data2d.get(v_id)
astra.algorithm.delete(sirt_id)<|fim▁hole|> astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out<|fim▁end|> | |
<|file_name|>dir_code.cpp<|end_file_name|><|fim▁begin|>#include <iostream>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <errno.h>
using namespace std;
int main(int argc, char** argv)
{
if(argc <= 1)
{
cout << "Nothing passed in to argv." << endl;
exit(1);
}
else
{
DIR *dirp;
if(NULL == (dirp = opendir(argv[1])))
{
perror("There was an error with opendir(). ");
exit(1);
}
struct dirent *filespecs;
errno = 0;
while(NULL != (filespecs = readdir(dirp)))
{
cout << filespecs->d_name << " ";
}
if(errno != 0)
{
perror("There was an error with readdir(). ");
exit(1);
}
cout << endl;
if(-1 == closedir(dirp))
{
perror("There was an error with closedir(). ");
exit(1);
}<|fim▁hole|> }
return 0;
}<|fim▁end|> | |
<|file_name|>ManaUsageChartComponent.js<|end_file_name|><|fim▁begin|>import React from 'react';
import PropTypes from 'prop-types';
import ManaUsageGraph from './ManaUsageGraph';
class HealingDoneGraph extends React.PureComponent {
static propTypes = {
start: PropTypes.number.isRequired,
end: PropTypes.number.isRequired,
offset: PropTypes.number.isRequired,
healingBySecond: PropTypes.object.isRequired,
manaUpdates: PropTypes.array.isRequired,
};
groupHealingBySeconds(healingBySecond, interval) {
return Object.keys(healingBySecond)
.reduce((obj, second) => {
const healing = healingBySecond[second];
const index = Math.floor(second / interval);
if (obj[index]) {
obj[index] = obj[index].add(healing.regular, healing.absorbed, healing.overheal);
} else {
obj[index] = healing;
}
return obj;
}, {});
}
render() {
const { start, end, offset, healingBySecond, manaUpdates } = this.props;
// TODO: move this to vega-lite window transform
// e.g. { window: [{op: 'mean', field: 'hps', as: 'hps'}], frame: [-2, 2] }
const interval = 5;
const healingPerFrame = this.groupHealingBySeconds(healingBySecond, interval);
let max = 0;
Object.keys(healingPerFrame)
.map(k => healingPerFrame[k])
.forEach((healingDone) => {
const current = healingDone.effective;
if (current > max) {
max = current;
}
});
max /= interval;
const manaUsagePerFrame = {
0: 0,
};
const manaLevelPerFrame = {
0: 1,
};
manaUpdates.forEach((item) => {
const frame = Math.floor((item.timestamp - start) / 1000 / interval);
manaUsagePerFrame[frame] = (manaUsagePerFrame[frame] || 0) + item.used / item.max;
manaLevelPerFrame[frame] = item.current / item.max; // use the lowest value of the frame; likely to be more accurate
});
const fightDurationSec = Math.ceil((end - start) / 1000);
const labels = [];
for (let i = 0; i <= fightDurationSec / interval; i += 1) {
labels.push(Math.ceil(offset/1000) + i * interval);
healingPerFrame[i] = healingPerFrame[i] !== undefined ? healingPerFrame[i].effective : 0;
manaUsagePerFrame[i] = manaUsagePerFrame[i] !== undefined ? manaUsagePerFrame[i] : 0;
manaLevelPerFrame[i] = manaLevelPerFrame[i] !== undefined ? manaLevelPerFrame[i] : null;
}
let lastKnown = null;
const mana = Object.values(manaLevelPerFrame).map((value, i) => {
if (value !== null) {
lastKnown = value;
}
return {
x: labels[i],
y: lastKnown * max,
};
});
const healing = Object.values(healingPerFrame).map((value, i) => ({ x: labels[i], y: value / interval }));
const manaUsed = Object.values(manaUsagePerFrame).map((value, i) => ({ x: labels[i], y: value * max }));
return (
<div className="graph-container" style={{ marginBottom: 20 }}>
<ManaUsageGraph
mana={mana}
healing={healing}
manaUsed={manaUsed}
/>
</div>
);
}<|fim▁hole|><|fim▁end|> | }
export default HealingDoneGraph; |
<|file_name|>indexentries.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
sphinx.environment.managers.indexentries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Index entries manager for sphinx.environment.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import bisect
import unicodedata
import string
from itertools import groupby
from six import text_type
from sphinx import addnodes
from sphinx.util import iteritems, split_index_msg, split_into
from sphinx.locale import _
from sphinx.environment.managers import EnvironmentManager
class IndexEntries(EnvironmentManager):
name = 'indices'
def __init__(self, env):
super(IndexEntries, self).__init__(env)
self.data = env.indexentries
def clear_doc(self, docname):
self.data.pop(docname, None)
def merge_other(self, docnames, other):
for docname in docnames:
self.data[docname] = other.indexentries[docname]
def process_doc(self, docname, doctree):
entries = self.data[docname] = []
for node in doctree.traverse(addnodes.index):
try:
for entry in node['entries']:
split_index_msg(entry[0], entry[1])
except ValueError as exc:
self.env.warn_node(exc, node)
node.parent.remove(node)
else:
for entry in node['entries']:
if len(entry) == 5:
# Since 1.4: new index structure including index_key (5th column)
entries.append(entry)
else:
entries.append(entry + (None,))
def create_index(self, builder, group_entries=True,
_fixre=re.compile(r'(.*) ([(][^()]*[)])')):
"""Create the real index from the collected index entries."""
from sphinx.environment import NoUri
new = {}
def add_entry(word, subword, main, link=True, dic=new, key=None):
# Force the word to be unicode if it's a ASCII bytestring.
# This will solve problems with unicode normalization later.
# For instance the RFC role will add bytestrings at the moment
word = text_type(word)
entry = dic.get(word)
if not entry:
dic[word] = entry = [[], {}, key]
if subword:
add_entry(subword, '', main, link=link, dic=entry[1], key=key)
elif link:
try:
uri = builder.get_relative_uri('genindex', fn) + '#' + tid
except NoUri:
pass
else:
# maintain links in sorted/deterministic order
bisect.insort(entry[0], (main, uri))
for fn, entries in iteritems(self.data):
# new entry types must be listed in directives/other.py!
for type, value, tid, main, index_key in entries:
try:
if type == 'single':
try:
entry, subentry = split_into(2, 'single', value)
except ValueError:
entry, = split_into(1, 'single', value)
subentry = ''
add_entry(entry, subentry, main, key=index_key)
elif type == 'pair':
first, second = split_into(2, 'pair', value)
add_entry(first, second, main, key=index_key)
add_entry(second, first, main, key=index_key)
elif type == 'triple':
first, second, third = split_into(3, 'triple', value)
add_entry(first, second + ' ' + third, main, key=index_key)
add_entry(second, third + ', ' + first, main, key=index_key)
add_entry(third, first + ' ' + second, main, key=index_key)
elif type == 'see':
first, second = split_into(2, 'see', value)
add_entry(first, _('see %s') % second, None,
link=False, key=index_key)
elif type == 'seealso':
first, second = split_into(2, 'see', value)
add_entry(first, _('see also %s') % second, None,
link=False, key=index_key)
else:
self.env.warn(fn, 'unknown index entry type %r' % type)
except ValueError as err:
self.env.warn(fn, str(err))
# sort the index entries; put all symbols at the front, even those
# following the letters in ASCII, this is where the chr(127) comes from
def keyfunc(entry, lcletters=string.ascii_lowercase + '_'):
key, (void, void, category_key) = entry
if category_key:
# using specified category key to sort
key = category_key
lckey = unicodedata.normalize('NFD', key.lower())
if lckey[0:1] in lcletters:
lckey = chr(127) + lckey
# ensure a determinstic order *within* letters by also sorting on
# the entry itself
return (lckey, entry[0])
newlist = sorted(new.items(), key=keyfunc)
if group_entries:
# fixup entries: transform
# func() (in module foo)
# func() (in module bar)
# into
# func()
# (in module foo)
# (in module bar)
oldkey = ''
oldsubitems = None
i = 0
while i < len(newlist):
key, (targets, subitems, _key) = newlist[i]
# cannot move if it has subitems; structure gets too complex
if not subitems:
m = _fixre.match(key)
if m:
if oldkey == m.group(1):
# prefixes match: add entry as subitem of the
# previous entry
oldsubitems.setdefault(m.group(2), [[], {}, _key])[0].\
extend(targets)
del newlist[i]
continue
oldkey = m.group(1)
else:
oldkey = key
oldsubitems = subitems
i += 1
# group the entries by letter
def keyfunc2(item, letters=string.ascii_uppercase + '_'):<|fim▁hole|> k, v = item
v[1] = sorted((si, se) for (si, (se, void, void)) in iteritems(v[1]))
if v[2] is None:
# now calculate the key
letter = unicodedata.normalize('NFD', k[0])[0].upper()
if letter in letters:
return letter
else:
# get all other symbols under one heading
return _('Symbols')
else:
return v[2]
return [(key_, list(group))
for (key_, group) in groupby(newlist, keyfunc2)]<|fim▁end|> | # hack: mutating the subitems dicts to a list in the keyfunc |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
extern crate build;
fn main() {
build::link("wow32", true)
}<|fim▁end|> | |
<|file_name|>prompt.py<|end_file_name|><|fim▁begin|>"""
Enables interactivity for CLI operations
"""
import sys
<|fim▁hole|>
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")<|fim▁end|> | def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer. |
<|file_name|>geojsonsource.js<|end_file_name|><|fim▁begin|>goog.provide('ol.source.GeoJSON');
goog.require('ol.format.GeoJSON');
goog.require('ol.source.StaticVector');
/**
* @classdesc
* Static vector source in GeoJSON format
*
<|fim▁hole|> * @constructor
* @extends {ol.source.StaticVector}
* @fires ol.source.VectorEvent
* @param {olx.source.GeoJSONOptions=} opt_options Options.
* @api
*/
ol.source.GeoJSON = function(opt_options) {
var options = goog.isDef(opt_options) ? opt_options : {};
goog.base(this, {
attributions: options.attributions,
extent: options.extent,
format: new ol.format.GeoJSON({
defaultDataProjection: options.defaultProjection
}),
logo: options.logo,
object: options.object,
projection: options.projection,
text: options.text,
url: options.url,
urls: options.urls
});
};
goog.inherits(ol.source.GeoJSON, ol.source.StaticVector);<|fim▁end|> | |
<|file_name|>attachment.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <[email protected]>
# Copyright (C) 2005 Christopher Lenz <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <[email protected]>
# Christopher Lenz <[email protected]>
from cStringIO import StringIO
from datetime import datetime
import errno
import hashlib
import os.path
import posixpath
import re
import shutil
import sys
import unicodedata
from genshi.builder import tag
from trac.admin import AdminCommandError, IAdminCommandProvider, PrefixList, \
console_datetime_format, get_dir_list
from trac.config import BoolOption, IntOption
from trac.core import *
from trac.mimeview import *
from trac.perm import PermissionError, IPermissionPolicy
from trac.resource import *
from trac.search import search_to_sql, shorten_result
from trac.util import content_disposition, create_zipinfo, get_reporter_id
from trac.util.datefmt import datetime_now, format_datetime, from_utimestamp, \
to_datetime, to_utimestamp, utc
from trac.util.text import exception_to_unicode, path_to_unicode, \
pretty_size, print_table, stripws, unicode_unquote
from trac.util.translation import _, tag_
from trac.web import HTTPBadRequest, IRequestHandler, RequestDone
from trac.web.chrome import (INavigationContributor, add_ctxtnav, add_link,
add_stylesheet, web_context, add_warning)
from trac.web.href import Href
from trac.wiki.api import IWikiSyntaxProvider
from trac.wiki.formatter import format_to
class InvalidAttachment(TracError):
"""Exception raised when attachment validation fails."""
class IAttachmentChangeListener(Interface):
"""Extension point interface for components that require
notification when attachments are created or deleted."""
def attachment_added(attachment):
"""Called when an attachment is added."""
def attachment_deleted(attachment):
"""Called when an attachment is deleted."""
def attachment_reparented(attachment, old_parent_realm, old_parent_id):
"""Called when an attachment is reparented."""
class IAttachmentManipulator(Interface):
"""Extension point interface for components that need to
manipulate attachments.
Unlike change listeners, a manipulator can reject changes being
committed to the database."""
def prepare_attachment(req, attachment, fields):
"""Not currently called, but should be provided for future
compatibility."""
def validate_attachment(req, attachment):
"""Validate an attachment after upload but before being stored
in Trac environment.
Must return a list of ``(field, message)`` tuples, one for
each problem detected. ``field`` can be any of
``description``, ``username``, ``filename``, ``content``, or
`None` to indicate an overall problem with the
attachment. Therefore, a return value of ``[]`` means
everything is OK."""
class ILegacyAttachmentPolicyDelegate(Interface):
"""Interface that can be used by plugins to seamlessly participate
to the legacy way of checking for attachment permissions.
This should no longer be necessary once it becomes easier to
setup fine-grained permissions in the default permission store.
"""
def check_attachment_permission(action, username, resource, perm):
"""Return the usual `True`/`False`/`None` security policy
decision appropriate for the requested action on an
attachment.
:param action: one of ATTACHMENT_VIEW, ATTACHMENT_CREATE,
ATTACHMENT_DELETE
:param username: the user string
:param resource: the `~trac.resource.Resource` for the
attachment. Note that when
ATTACHMENT_CREATE is checked, the
resource ``.id`` will be `None`.
:param perm: the permission cache for that username and resource
"""
class AttachmentModule(Component):
implements(IRequestHandler, INavigationContributor, IWikiSyntaxProvider,
IResourceManager)
realm = 'attachment'
is_valid_default_handler = False
change_listeners = ExtensionPoint(IAttachmentChangeListener)
manipulators = ExtensionPoint(IAttachmentManipulator)
CHUNK_SIZE = 4096
max_size = IntOption('attachment', 'max_size', 262144,
"""Maximum allowed file size (in bytes) for attachments.""")
max_zip_size = IntOption('attachment', 'max_zip_size', 2097152,
"""Maximum allowed total size (in bytes) for an attachment list to be
downloadable as a `.zip`. Set this to -1 to disable download as `.zip`.
(''since 1.0'')""")
render_unsafe_content = BoolOption('attachment', 'render_unsafe_content',
'false',
"""Whether attachments should be rendered in the browser, or
only made downloadable.
Pretty much any file may be interpreted as HTML by the browser,
which allows a malicious user to attach a file containing cross-site
scripting attacks.
For public sites where anonymous users can create attachments it is
recommended to leave this option disabled.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return req.args.get('realm')
def get_navigation_items(self, req):
return []
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/(raw-|zip-)?attachment/([^/]+)(?:/(.*))?$',
req.path_info)
if match:
format, realm, path = match.groups()
if format:
req.args['format'] = format[:-1]
req.args['realm'] = realm
if path:
req.args['path'] = path
return True
def process_request(self, req):
parent_id = None
parent_realm = req.args.get('realm')
path = req.args.get('path')
filename = None
if not parent_realm or not path:
raise HTTPBadRequest(_('Bad request'))
if parent_realm == 'attachment':
raise TracError(tag_("%(realm)s is not a valid parent realm",
realm=tag.code(parent_realm)))
parent_realm = Resource(parent_realm)
action = req.args.get('action', 'view')
if action == 'new':
parent_id = path.rstrip('/')
else:
last_slash = path.rfind('/')
if last_slash == -1:
parent_id, filename = path, ''
else:
parent_id, filename = path[:last_slash], path[last_slash + 1:]
parent = parent_realm(id=parent_id)
if not resource_exists(self.env, parent):
raise ResourceNotFound(
_("Parent resource %(parent)s doesn't exist",
parent=get_resource_name(self.env, parent)))
# Link the attachment page to parent resource
parent_name = get_resource_name(self.env, parent)
parent_url = get_resource_url(self.env, parent, req.href)
add_link(req, 'up', parent_url, parent_name)
add_ctxtnav(req, _('Back to %(parent)s', parent=parent_name),
parent_url)
if not filename: # there's a trailing '/'
if req.args.get('format') == 'zip':
self._download_as_zip(req, parent)
elif action != 'new':
return self._render_list(req, parent)
attachment = Attachment(self.env, parent.child(self.realm, filename))
if req.method == 'POST':
if action == 'new':
data = self._do_save(req, attachment)
elif action == 'delete':
self._do_delete(req, attachment)
else:
raise HTTPBadRequest(_("Invalid request arguments."))
elif action == 'delete':
data = self._render_confirm_delete(req, attachment)
elif action == 'new':
data = self._render_form(req, attachment)
else:
data = self._render_view(req, attachment)
add_stylesheet(req, 'common/css/code.css')
return 'attachment.html', data, None
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
yield ('raw-attachment', self._format_link)
yield ('attachment', self._format_link)
# Public methods
def viewable_attachments(self, context):
"""Return the list of viewable attachments in the given context.
:param context: the `~trac.mimeview.api.RenderingContext`
corresponding to the parent
`~trac.resource.Resource` for the attachments
"""
parent = context.resource
attachments = []
for attachment in Attachment.select(self.env, parent.realm, parent.id):
if 'ATTACHMENT_VIEW' in context.perm(attachment.resource):
attachments.append(attachment)
return attachments
def attachment_data(self, context):
"""Return a data dictionary describing the list of viewable
attachments in the current context.
"""
attachments = self.viewable_attachments(context)
parent = context.resource
total_size = sum(attachment.size for attachment in attachments)
new_att = parent.child(self.realm)
return {'attach_href': get_resource_url(self.env, new_att,
context.href),
'download_href': get_resource_url(self.env, new_att,
context.href, format='zip')
if total_size <= self.max_zip_size else None,
'can_create': 'ATTACHMENT_CREATE' in context.perm(new_att),
'attachments': attachments,
'parent': context.resource}
def get_history(self, start, stop, realm):
"""Return an iterable of tuples describing changes to attachments on
a particular object realm.
The tuples are in the form (change, realm, id, filename, time,
description, author). `change` can currently only be `created`.
FIXME: no iterator
"""
for realm, id, filename, ts, description, author in \
self.env.db_query("""
SELECT type, id, filename, time, description, author
FROM attachment WHERE time > %s AND time < %s AND type = %s
""", (to_utimestamp(start), to_utimestamp(stop), realm)):
time = from_utimestamp(ts or 0)
yield ('created', realm, id, filename, time, description, author)
def get_timeline_events(self, req, resource_realm, start, stop):
"""Return an event generator suitable for ITimelineEventProvider.
Events are changes to attachments on resources of the given
`resource_realm.realm`.
"""
for change, realm, id, filename, time, descr, author in \
self.get_history(start, stop, resource_realm.realm):
attachment = resource_realm(id=id).child(self.realm, filename)
if 'ATTACHMENT_VIEW' in req.perm(attachment):
yield ('attachment', time, author, (attachment, descr), self)
def render_timeline_event(self, context, field, event):
attachment, descr = event[3]
if field == 'url':
return self.get_resource_url(attachment, context.href)
elif field == 'title':
name = get_resource_name(self.env, attachment.parent)
title = get_resource_summary(self.env, attachment.parent)
return tag_("%(attachment)s attached to %(resource)s",
attachment=tag.em(os.path.basename(attachment.id)),
resource=tag.em(name, title=title))
elif field == 'description':
return format_to(self.env, None, context.child(attachment.parent),
descr)
def get_search_results(self, req, resource_realm, terms):
"""Return a search result generator suitable for ISearchSource.
Search results are attachments on resources of the given
`resource_realm.realm` whose filename, description or author match
the given terms.
"""
with self.env.db_query as db:
sql_query, args = search_to_sql(
db, ['filename', 'description', 'author'], terms)
for id, time, filename, desc, author in db("""
SELECT id, time, filename, description, author
FROM attachment WHERE type = %s AND """ + sql_query,
(resource_realm.realm,) + args):
attachment = resource_realm(id=id).child(self.realm, filename)
if 'ATTACHMENT_VIEW' in req.perm(attachment):
yield (get_resource_url(self.env, attachment, req.href),
get_resource_shortname(self.env, attachment),
from_utimestamp(time), author,
shorten_result(desc, terms))
# IResourceManager methods
def get_resource_realms(self):
yield self.realm
def get_resource_url(self, resource, href, **kwargs):
"""Return an URL to the attachment itself.
A `format` keyword argument equal to `'raw'` will be converted
to the raw-attachment prefix.
"""
if not resource.parent:
return None
format = kwargs.get('format')
prefix = 'attachment'
if format in ('raw', 'zip'):
kwargs.pop('format')
prefix = format + '-attachment'
parent_href = unicode_unquote(get_resource_url(self.env,
resource.parent(version=None), Href('')))
if not resource.id:
# link to list of attachments, which must end with a trailing '/'
# (see process_request)
return href(prefix, parent_href, '', **kwargs)
else:
return href(prefix, parent_href, resource.id, **kwargs)
def get_resource_description(self, resource, format=None, **kwargs):
if not resource.parent:
return _("Unparented attachment %(id)s", id=resource.id)
if format == 'compact':
return '%s (%s)' % (resource.id,
get_resource_name(self.env, resource.parent))
elif format == 'summary':
return Attachment(self.env, resource).description
if resource.id:
return _("Attachment '%(id)s' in %(parent)s", id=resource.id,
parent=get_resource_name(self.env, resource.parent))
else:
return _("Attachments of %(parent)s",
parent=get_resource_name(self.env, resource.parent))
def resource_exists(self, resource):
try:
attachment = Attachment(self.env, resource)
return os.path.exists(attachment.path)
except ResourceNotFound:
return False
# Internal methods
def _do_save(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_CREATE')
parent_resource = attachment.resource.parent
if 'cancel' in req.args:
req.redirect(get_resource_url(self.env, parent_resource, req.href))
upload = req.args.getfirst('attachment')
if not hasattr(upload, 'filename') or not upload.filename:
raise TracError(_("No file uploaded"))
if hasattr(upload.file, 'fileno'):
size = os.fstat(upload.file.fileno())[6]
else:
upload.file.seek(0, 2) # seek to end of file
size = upload.file.tell()
upload.file.seek(0)
if size == 0:
raise TracError(_("Can't upload empty file"))
# Maximum attachment size (in bytes)
max_size = self.max_size
if 0 <= max_size < size:
raise TracError(_("Maximum attachment size: %(num)s",
num=pretty_size(max_size)), _("Upload failed"))
filename = _normalized_filename(upload.filename)
if not filename:
raise TracError(_("No file uploaded"))
# Now the filename is known, update the attachment resource
attachment.filename = filename
attachment.description = req.args.get('description', '')
attachment.author = get_reporter_id(req, 'author')
attachment.ipnr = req.remote_addr
# Validate attachment
valid = True
for manipulator in self.manipulators:
for field, message in manipulator.validate_attachment(req,
attachment):
valid = False
if field:
add_warning(req,
_('Attachment field %(field)s is invalid: %(message)s',
field=field, message=message))
else:
add_warning(req,
_('Invalid attachment: %(message)s', message=message))
if not valid:
# Display the attach form with pre-existing data
# NOTE: Local file path not known, file field cannot be repopulated
add_warning(req, _('Note: File must be selected again.'))
data = self._render_form(req, attachment)
data['is_replace'] = req.args.get('replace')
return data
if req.args.get('replace'):
try:
old_attachment = Attachment(self.env,
attachment.resource(id=filename))
if not (req.authname and req.authname != 'anonymous'
and old_attachment.author == req.authname) \
and 'ATTACHMENT_DELETE' \
not in req.perm(attachment.resource):
raise PermissionError(msg=_("You don't have permission to "
"replace the attachment %(name)s. You can only "
"replace your own attachments. Replacing other's "
"attachments requires ATTACHMENT_DELETE permission.",
name=filename))
if (not attachment.description.strip() and
old_attachment.description):
attachment.description = old_attachment.description
old_attachment.delete()
except TracError:
pass # don't worry if there's nothing to replace
attachment.insert(filename, upload.file, size)
req.redirect(get_resource_url(self.env, attachment.resource(id=None),
req.href))
def _do_delete(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_DELETE')
parent_href = get_resource_url(self.env, attachment.resource.parent,
req.href)
if 'cancel' in req.args:
req.redirect(parent_href)
attachment.delete()
req.redirect(parent_href)
def _render_confirm_delete(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_DELETE')
return {'mode': 'delete',
'title': _('%(attachment)s (delete)',
attachment=get_resource_name(self.env,
attachment.resource)),
'attachment': attachment}
def _render_form(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_CREATE')
return {'mode': 'new', 'author': get_reporter_id(req),
'attachment': attachment, 'max_size': self.max_size}
def _download_as_zip(self, req, parent, attachments=None):
if attachments is None:
attachments = self.viewable_attachments(web_context(req, parent))
total_size = sum(attachment.size for attachment in attachments)
if total_size > self.max_zip_size:
raise TracError(_("Maximum total attachment size: %(num)s",
num=pretty_size(self.max_zip_size)), _("Download failed"))
req.send_response(200)
req.send_header('Content-Type', 'application/zip')
filename = 'attachments-%s-%s.zip' % \
(parent.realm, re.sub(r'[/\\:]', '-', unicode(parent.id)))
req.send_header('Content-Disposition',
content_disposition('inline', filename))
from zipfile import ZipFile, ZIP_DEFLATED
buf = StringIO()
zipfile = ZipFile(buf, 'w', ZIP_DEFLATED)
for attachment in attachments:
zipinfo = create_zipinfo(attachment.filename,
mtime=attachment.date,
comment=attachment.description)
try:
with attachment.open() as fd:
zipfile.writestr(zipinfo, fd.read())
except ResourceNotFound:
pass # skip missing files
zipfile.close()
zip_str = buf.getvalue()
req.send_header("Content-Length", len(zip_str))
req.end_headers()
req.write(zip_str)
raise RequestDone()
def _render_list(self, req, parent):
data = {
'mode': 'list',
'attachment': None, # no specific attachment
'attachments': self.attachment_data(web_context(req, parent))
}
return 'attachment.html', data, None
def _render_view(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_VIEW')
can_delete = 'ATTACHMENT_DELETE' in req.perm(attachment.resource)
req.check_modified(attachment.date, str(can_delete))
data = {'mode': 'view',
'title': get_resource_name(self.env, attachment.resource),
'attachment': attachment}
with attachment.open() as fd:
mimeview = Mimeview(self.env)
# MIME type detection
str_data = fd.read(1000)
fd.seek(0)
mime_type = mimeview.get_mimetype(attachment.filename, str_data)
# Eventually send the file directly
format = req.args.get('format')
if format == 'zip':
self._download_as_zip(req, attachment.resource.parent,
[attachment])
elif format in ('raw', 'txt'):
if not self.render_unsafe_content:
# Force browser to download files instead of rendering
# them, since they might contain malicious code enabling
# XSS attacks
req.send_header('Content-Disposition', 'attachment')
if format == 'txt':
mime_type = 'text/plain'
elif not mime_type:
mime_type = 'application/octet-stream'
if 'charset=' not in mime_type:
charset = mimeview.get_charset(str_data, mime_type)
mime_type = mime_type + '; charset=' + charset
req.send_file(attachment.path, mime_type)
# add ''Plain Text'' alternate link if needed
if (self.render_unsafe_content and
mime_type and not mime_type.startswith('text/plain')):
plaintext_href = get_resource_url(self.env,
attachment.resource,
req.href, format='txt')
add_link(req, 'alternate', plaintext_href, _('Plain Text'),
mime_type)
# add ''Original Format'' alternate link (always)
raw_href = get_resource_url(self.env, attachment.resource,
req.href, format='raw')
add_link(req, 'alternate', raw_href, _('Original Format'),
mime_type)
self.log.debug("Rendering preview of file %s with mime-type %s",
attachment.filename, mime_type)
data['preview'] = mimeview.preview_data(
web_context(req, attachment.resource), fd,
os.fstat(fd.fileno()).st_size, mime_type,
attachment.filename, raw_href, annotations=['lineno'])
return data
def _format_link(self, formatter, ns, target, label):
link, params, fragment = formatter.split_link(target)
ids = link.split(':', 2)
attachment = None
if len(ids) == 3:
known_realms = ResourceSystem(self.env).get_known_realms()
# new-style attachment: TracLinks (filename:realm:id)
if ids[1] in known_realms:
attachment = Resource(ids[1], ids[2]).child(self.realm,
ids[0])
else: # try old-style attachment: TracLinks (realm:id:filename)
if ids[0] in known_realms:
attachment = Resource(ids[0], ids[1]).child(self.realm,
ids[2])
else: # local attachment: TracLinks (filename)
attachment = formatter.resource.child(self.realm, link)
if attachment and 'ATTACHMENT_VIEW' in formatter.perm(attachment):
try:
model = Attachment(self.env, attachment)
raw_href = get_resource_url(self.env, attachment,
formatter.href, format='raw')
if ns.startswith('raw'):
return tag.a(label, class_='attachment',
href=raw_href + params,
title=get_resource_name(self.env, attachment))
href = get_resource_url(self.env, attachment, formatter.href)
title = get_resource_name(self.env, attachment)
return tag(tag.a(label, class_='attachment', title=title,
href=href + params),
tag.a(u'\u200b', class_='trac-rawlink',
href=raw_href + params, title=_("Download")))
except ResourceNotFound:
pass
# FIXME: should be either:
#
# model = Attachment(self.env, attachment)
# if model.exists:
# ...
#
# or directly:
#
# if attachment.exists:
#
# (related to #4130)
return tag.a(label, class_='missing attachment')
class Attachment(object):
"""Represents an attachment (new or existing).
:since 1.0.5: `ipnr` is deprecated and will be removed in 1.3.1
"""
realm = AttachmentModule.realm
@property
def resource(self):
return Resource(self.parent_realm, self.parent_id) \
.child(self.realm, self.filename)
def __init__(self, env, parent_realm_or_attachment_resource,
parent_id=None, filename=None):
if isinstance(parent_realm_or_attachment_resource, Resource):
resource = parent_realm_or_attachment_resource
self.parent_realm = resource.parent.realm
self.parent_id = unicode(resource.parent.id)
self.filename = resource.id
else:
self.parent_realm = parent_realm_or_attachment_resource
self.parent_id = unicode(parent_id)
self.filename = filename
self.env = env
if self.filename:
self._fetch(self.filename)
else:
self.filename = None
self.description = None
self.size = None
self.date = None
self.author = None
self.ipnr = None
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.filename)
def _from_database(self, filename, description, size, time, author, ipnr):
self.filename = filename
self.description = description
self.size = int(size) if size else 0
self.date = from_utimestamp(time or 0)
self.author = author<|fim▁hole|>
def _fetch(self, filename):
for row in self.env.db_query("""
SELECT filename, description, size, time, author, ipnr
FROM attachment WHERE type=%s AND id=%s AND filename=%s
ORDER BY time
""", (self.parent_realm, unicode(self.parent_id), filename)):
self._from_database(*row)
break
else:
self.filename = filename
raise ResourceNotFound(_("Attachment '%(title)s' does not exist.",
title=self.title),
_('Invalid Attachment'))
# _get_path() and _get_hashed_filename() are class methods so that they
# can be used in db28.py.
@classmethod
def _get_path(cls, env_path, parent_realm, parent_id, filename):
"""Get the path of an attachment.
WARNING: This method is used by db28.py for moving attachments from
the old "attachments" directory to the "files" directory. Please check
all changes so that they don't break the upgrade.
"""
path = os.path.join(env_path, 'files', 'attachments',
parent_realm)
hash = hashlib.sha1(parent_id.encode('utf-8')).hexdigest()
path = os.path.join(path, hash[0:3], hash)
if filename:
path = os.path.join(path, cls._get_hashed_filename(filename))
return os.path.normpath(path)
_extension_re = re.compile(r'\.[A-Za-z0-9]+\Z')
@classmethod
def _get_hashed_filename(cls, filename):
"""Get the hashed filename corresponding to the given filename.
WARNING: This method is used by db28.py for moving attachments from
the old "attachments" directory to the "files" directory. Please check
all changes so that they don't break the upgrade.
"""
hash = hashlib.sha1(filename.encode('utf-8')).hexdigest()
match = cls._extension_re.search(filename)
return hash + match.group(0) if match else hash
@property
def path(self):
return self._get_path(self.env.path, self.parent_realm, self.parent_id,
self.filename)
@property
def title(self):
return '%s:%s: %s' % (self.parent_realm, self.parent_id, self.filename)
def delete(self):
"""Delete the attachment, both the record in the database and
the file itself.
"""
assert self.filename, "Cannot delete non-existent attachment"
with self.env.db_transaction as db:
db("""
DELETE FROM attachment WHERE type=%s AND id=%s AND filename=%s
""", (self.parent_realm, self.parent_id, self.filename))
path = self.path
if os.path.isfile(path):
try:
os.unlink(path)
except OSError as e:
self.env.log.error("Failed to delete attachment "
"file %s: %s",
path,
exception_to_unicode(e, traceback=True))
raise TracError(_("Could not delete attachment"))
self.env.log.info("Attachment removed: %s", self.title)
for listener in AttachmentModule(self.env).change_listeners:
listener.attachment_deleted(self)
def reparent(self, new_realm, new_id):
assert self.filename, "Cannot reparent non-existent attachment"
new_id = unicode(new_id)
new_path = self._get_path(self.env.path, new_realm, new_id,
self.filename)
# Make sure the path to the attachment is inside the environment
# attachments directory
attachments_dir = os.path.join(os.path.normpath(self.env.path),
'files', 'attachments')
commonprefix = os.path.commonprefix([attachments_dir, new_path])
if commonprefix != attachments_dir:
raise TracError(_('Cannot reparent attachment "%(att)s" as '
'%(realm)s:%(id)s is invalid',
att=self.filename, realm=new_realm, id=new_id))
if os.path.exists(new_path):
raise TracError(_('Cannot reparent attachment "%(att)s" as '
'it already exists in %(realm)s:%(id)s',
att=self.filename, realm=new_realm, id=new_id))
with self.env.db_transaction as db:
db("""UPDATE attachment SET type=%s, id=%s
WHERE type=%s AND id=%s AND filename=%s
""", (new_realm, new_id, self.parent_realm, self.parent_id,
self.filename))
dirname = os.path.dirname(new_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
path = self.path
if os.path.isfile(path):
try:
os.rename(path, new_path)
except OSError as e:
self.env.log.error("Failed to move attachment file %s: %s",
path,
exception_to_unicode(e, traceback=True))
raise TracError(_("Could not reparent attachment %(name)s",
name=self.filename))
old_realm, old_id = self.parent_realm, self.parent_id
self.parent_realm, self.parent_id = new_realm, new_id
self.env.log.info("Attachment reparented: %s", self.title)
for listener in AttachmentModule(self.env).change_listeners:
if hasattr(listener, 'attachment_reparented'):
listener.attachment_reparented(self, old_realm, old_id)
def insert(self, filename, fileobj, size, t=None):
"""Create a new Attachment record and save the file content.
"""
self.size = int(size) if size else 0
self.filename = None
if t is None:
t = datetime_now(utc)
elif not isinstance(t, datetime): # Compatibility with 0.11
t = to_datetime(t, utc)
self.date = t
parent_resource = Resource(self.parent_realm, self.parent_id)
if not resource_exists(self.env, parent_resource):
raise ResourceNotFound(
_("%(parent)s doesn't exist, can't create attachment",
parent=get_resource_name(self.env, parent_resource)))
# Make sure the path to the attachment is inside the environment
# attachments directory
attachments_dir = os.path.join(os.path.normpath(self.env.path),
'files', 'attachments')
dir = self.path
commonprefix = os.path.commonprefix([attachments_dir, dir])
if commonprefix != attachments_dir:
raise TracError(_('Cannot create attachment "%(att)s" as '
'%(realm)s:%(id)s is invalid',
att=filename, realm=self.parent_realm,
id=self.parent_id))
if not os.access(dir, os.F_OK):
os.makedirs(dir)
filename, targetfile = self._create_unique_file(dir, filename)
with targetfile:
with self.env.db_transaction as db:
db("INSERT INTO attachment VALUES (%s,%s,%s,%s,%s,%s,%s,%s)",
(self.parent_realm, self.parent_id, filename, self.size,
to_utimestamp(t), self.description, self.author,
self.ipnr))
shutil.copyfileobj(fileobj, targetfile)
self.filename = filename
self.env.log.info("New attachment: %s by %s", self.title,
self.author)
for listener in AttachmentModule(self.env).change_listeners:
listener.attachment_added(self)
@classmethod
def select(cls, env, parent_realm, parent_id):
"""Iterator yielding all `Attachment` instances attached to
resource identified by `parent_realm` and `parent_id`.
:returns: a tuple containing the `filename`, `description`, `size`,
`time`, `author` and `ipnr`.
:since 1.0.5: use of `ipnr` is deprecated and will be removed in 1.3.1
"""
for row in env.db_query("""
SELECT filename, description, size, time, author, ipnr
FROM attachment WHERE type=%s AND id=%s ORDER BY time
""", (parent_realm, unicode(parent_id))):
attachment = Attachment(env, parent_realm, parent_id)
attachment._from_database(*row)
yield attachment
@classmethod
def delete_all(cls, env, parent_realm, parent_id):
"""Delete all attachments of a given resource.
"""
attachment_dir = None
with env.db_transaction as db:
for attachment in cls.select(env, parent_realm, parent_id):
attachment_dir = os.path.dirname(attachment.path)
attachment.delete()
if attachment_dir:
try:
os.rmdir(attachment_dir)
except OSError as e:
env.log.error("Can't delete attachment directory %s: %s",
attachment_dir,
exception_to_unicode(e, traceback=True))
@classmethod
def reparent_all(cls, env, parent_realm, parent_id, new_realm, new_id):
"""Reparent all attachments of a given resource to another resource."""
attachment_dir = None
with env.db_transaction as db:
for attachment in list(cls.select(env, parent_realm, parent_id)):
attachment_dir = os.path.dirname(attachment.path)
attachment.reparent(new_realm, new_id)
if attachment_dir:
try:
os.rmdir(attachment_dir)
except OSError as e:
env.log.error("Can't delete attachment directory %s: %s",
attachment_dir,
exception_to_unicode(e, traceback=True))
def open(self):
path = self.path
self.env.log.debug('Trying to open attachment at %s', path)
try:
fd = open(path, 'rb')
except IOError:
raise ResourceNotFound(_("Attachment '%(filename)s' not found",
filename=self.filename))
return fd
def _create_unique_file(self, dir, filename):
parts = os.path.splitext(filename)
flags = os.O_CREAT + os.O_WRONLY + os.O_EXCL
if hasattr(os, 'O_BINARY'):
flags += os.O_BINARY
idx = 1
while 1:
path = os.path.join(dir, self._get_hashed_filename(filename))
try:
return filename, os.fdopen(os.open(path, flags, 0666), 'w')
except OSError as e:
if e.errno != errno.EEXIST:
raise
idx += 1
# A sanity check
if idx > 100:
raise Exception('Failed to create unique name: ' + path)
filename = '%s.%d%s' % (parts[0], idx, parts[1])
class LegacyAttachmentPolicy(Component):
implements(IPermissionPolicy)
delegates = ExtensionPoint(ILegacyAttachmentPolicyDelegate)
realm = AttachmentModule.realm
# IPermissionPolicy methods
_perm_maps = {
'ATTACHMENT_CREATE': {'ticket': 'TICKET_APPEND', 'wiki': 'WIKI_MODIFY',
'milestone': 'MILESTONE_MODIFY'},
'ATTACHMENT_VIEW': {'ticket': 'TICKET_VIEW', 'wiki': 'WIKI_VIEW',
'milestone': 'MILESTONE_VIEW'},
'ATTACHMENT_DELETE': {'ticket': 'TICKET_ADMIN', 'wiki': 'WIKI_DELETE',
'milestone': 'MILESTONE_DELETE'},
}
def check_permission(self, action, username, resource, perm):
perm_map = self._perm_maps.get(action)
if not perm_map or not resource or resource.realm != self.realm:
return
legacy_action = perm_map.get(resource.parent.realm)
if legacy_action:
decision = legacy_action in perm(resource.parent)
if not decision:
self.log.debug('LegacyAttachmentPolicy denied %s access to '
'%s. User needs %s',
username, resource, legacy_action)
return decision
else:
for d in self.delegates:
decision = d.check_attachment_permission(action, username,
resource, perm)
if decision is not None:
return decision
class AttachmentAdmin(Component):
"""trac-admin command provider for attachment administration."""
implements(IAdminCommandProvider)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('attachment list', '<realm:id>',
"""List attachments of a resource
The resource is identified by its realm and identifier.""",
self._complete_list, self._do_list)
yield ('attachment add', '<realm:id> <path> [author] [description]',
"""Attach a file to a resource
The resource is identified by its realm and identifier. The
attachment will be named according to the base name of the file.
""",
self._complete_add, self._do_add)
yield ('attachment remove', '<realm:id> <name>',
"""Remove an attachment from a resource
The resource is identified by its realm and identifier.""",
self._complete_remove, self._do_remove)
yield ('attachment export', '<realm:id> <name> [destination]',
"""Export an attachment from a resource to a file or stdout
The resource is identified by its realm and identifier. If no
destination is specified, the attachment is output to stdout.
""",
self._complete_export, self._do_export)
def get_realm_list(self):
rs = ResourceSystem(self.env)
return PrefixList([each + ":" for each in rs.get_known_realms()])
def split_resource(self, resource):
result = resource.split(':', 1)
if len(result) != 2:
raise AdminCommandError(_("Invalid resource identifier '%(id)s'",
id=resource))
return result
def get_attachment_list(self, resource):
(realm, id) = self.split_resource(resource)
return [a.filename for a in Attachment.select(self.env, realm, id)]
def _complete_list(self, args):
if len(args) == 1:
return self.get_realm_list()
def _complete_add(self, args):
if len(args) == 1:
return self.get_realm_list()
elif len(args) == 2:
return get_dir_list(args[1])
def _complete_remove(self, args):
if len(args) == 1:
return self.get_realm_list()
elif len(args) == 2:
return self.get_attachment_list(args[0])
def _complete_export(self, args):
if len(args) < 3:
return self._complete_remove(args)
elif len(args) == 3:
return get_dir_list(args[2])
def _do_list(self, resource):
(realm, id) = self.split_resource(resource)
print_table([(a.filename, pretty_size(a.size), a.author,
format_datetime(a.date, console_datetime_format),
a.description)
for a in Attachment.select(self.env, realm, id)],
[_('Name'), _('Size'), _('Author'), _('Date'),
_('Description')])
def _do_add(self, resource, path, author='trac', description=''):
(realm, id) = self.split_resource(resource)
attachment = Attachment(self.env, realm, id)
attachment.author = author
attachment.description = description
filename = _normalized_filename(os.path.basename(path))
with open(path, 'rb') as f:
attachment.insert(filename, f, os.path.getsize(path))
def _do_remove(self, resource, name):
(realm, id) = self.split_resource(resource)
attachment = Attachment(self.env, realm, id, name)
attachment.delete()
def _do_export(self, resource, name, destination=None):
(realm, id) = self.split_resource(resource)
attachment = Attachment(self.env, realm, id, name)
if destination is not None:
if os.path.isdir(destination):
destination = os.path.join(destination, name)
if os.path.isfile(destination):
raise AdminCommandError(_("File '%(name)s' exists",
name=path_to_unicode(destination)))
with attachment.open() as input:
output = open(destination, "wb") if destination is not None \
else sys.stdout
try:
shutil.copyfileobj(input, output)
finally:
if destination is not None:
output.close()
_control_codes_re = re.compile(
'[' +
''.join(filter(lambda c: unicodedata.category(c) == 'Cc',
map(unichr, xrange(0x10000)))) +
']')
def _normalized_filename(filepath):
# We try to normalize the filename to unicode NFC if we can.
# Files uploaded from OS X might be in NFD.
if not isinstance(filepath, unicode):
filepath = unicode(filepath, 'utf-8')
filepath = unicodedata.normalize('NFC', filepath)
# Replace control codes with spaces, e.g. NUL, LF, DEL, U+009F
filepath = _control_codes_re.sub(' ', filepath)
# Replace backslashes with slashes if filename is Windows full path
if filepath.startswith('\\') or re.match(r'[A-Za-z]:\\', filepath):
filepath = filepath.replace('\\', '/')
# We want basename to be delimited by only slashes on all platforms
filename = posixpath.basename(filepath)
filename = stripws(filename)
return filename<|fim▁end|> | self.ipnr = ipnr |
<|file_name|>ast.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The Rust abstract syntax tree.
use codemap::{Span, Spanned, DUMMY_SP};
use abi::Abi;
use ast_util;
use owned_slice::OwnedSlice;
use parse::token::{InternedString, str_to_ident};
use parse::token;
use std::fmt;
use std::num::Zero;
use std::fmt::Show;
use std::option::Option;
use std::rc::Rc;
use std::gc::{Gc, GC};
use serialize::{Encodable, Decodable, Encoder, Decoder};
/// A pointer abstraction.
// FIXME(eddyb) #10676 use Rc<T> in the future.
pub type P<T> = Gc<T>;
#[allow(non_snake_case)]
/// Construct a P<T> from a T value.
pub fn P<T: 'static>(value: T) -> P<T> {
box(GC) value
}
// FIXME #6993: in librustc, uses of "ident" should be replaced
// by just "Name".
/// An identifier contains a Name (index into the interner
/// table) and a SyntaxContext to track renaming and
/// macro expansion per Flatt et al., "Macros
/// That Work Together"
#[deriving(Clone, Hash, PartialOrd, Eq, Ord)]
pub struct Ident {
pub name: Name,
pub ctxt: SyntaxContext
}
impl Ident {
/// Construct an identifier with the given name and an empty context:
pub fn new(name: Name) -> Ident { Ident {name: name, ctxt: EMPTY_CTXT}}
pub fn as_str<'a>(&'a self) -> &'a str {
self.name.as_str()
}
pub fn encode_with_hygiene(&self) -> String {
format!("\x00name_{:u},ctxt_{:u}\x00",
self.name.uint(),
self.ctxt)
}
}
impl Show for Ident {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}#{}", self.name, self.ctxt)
}
}
impl Show for Name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Name(nm) = *self;
write!(f, "\"{}\"({})", token::get_name(*self).get(), nm)
}
}
impl PartialEq for Ident {
fn eq(&self, other: &Ident) -> bool {
if self.ctxt == other.ctxt {
self.name == other.name
} else {
// IF YOU SEE ONE OF THESE FAILS: it means that you're comparing
// idents that have different contexts. You can't fix this without
// knowing whether the comparison should be hygienic or non-hygienic.
// if it should be non-hygienic (most things are), just compare the
// 'name' fields of the idents. Or, even better, replace the idents
// with Name's.
//
// On the other hand, if the comparison does need to be hygienic,
// one example and its non-hygienic counterpart would be:
// syntax::parse::token::mtwt_token_eq
// syntax::ext::tt::macro_parser::token_name_eq
fail!("not allowed to compare these idents: {:?}, {:?}. \
Probably related to issue \\#6993", self, other);
}
}
fn ne(&self, other: &Ident) -> bool {
! self.eq(other)
}
}
/// A SyntaxContext represents a chain of macro-expandings
/// and renamings. Each macro expansion corresponds to
/// a fresh uint
// I'm representing this syntax context as an index into
// a table, in order to work around a compiler bug
// that's causing unreleased memory to cause core dumps
// and also perhaps to save some work in destructor checks.
// the special uint '0' will be used to indicate an empty
// syntax context.
// this uint is a reference to a table stored in thread-local
// storage.
pub type SyntaxContext = u32;
pub static EMPTY_CTXT : SyntaxContext = 0;
pub static ILLEGAL_CTXT : SyntaxContext = 1;
/// A name is a part of an identifier, representing a string or gensym. It's
/// the result of interning.
#[deriving(Eq, Ord, PartialEq, PartialOrd, Hash, Encodable, Decodable, Clone)]
pub struct Name(pub u32);
impl Name {
pub fn as_str<'a>(&'a self) -> &'a str {
unsafe {
// FIXME #12938: can't use copy_lifetime since &str isn't a &T
::std::mem::transmute(token::get_name(*self).get())
}
}
pub fn uint(&self) -> uint {
let Name(nm) = *self;
nm as uint
}
pub fn ident(&self) -> Ident {
Ident { name: *self, ctxt: 0 }
}
}
/// A mark represents a unique id associated with a macro expansion
pub type Mrk = u32;
impl<S: Encoder<E>, E> Encodable<S, E> for Ident {
fn encode(&self, s: &mut S) -> Result<(), E> {
s.emit_str(token::get_ident(*self).get())
}
}
impl<D:Decoder<E>, E> Decodable<D, E> for Ident {
fn decode(d: &mut D) -> Result<Ident, E> {
Ok(str_to_ident(try!(d.read_str()).as_slice()))
}
}
/// Function name (not all functions have names)
pub type FnIdent = Option<Ident>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Lifetime {
pub id: NodeId,
pub span: Span,
pub name: Name
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct LifetimeDef {
pub lifetime: Lifetime,
pub bounds: Vec<Lifetime>
}
/// A "Path" is essentially Rust's notion of a name; for instance:
/// std::cmp::PartialEq . It's represented as a sequence of identifiers,
/// along with a bunch of supporting information.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Path {
pub span: Span,
/// A `::foo` path, is relative to the crate root rather than current
/// module (like paths in an import).
pub global: bool,
/// The segments in the path: the things separated by `::`.
pub segments: Vec<PathSegment> ,
}
/// A segment of a path: an identifier, an optional lifetime, and a set of
/// types.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct PathSegment {
/// The identifier portion of this path segment.
pub identifier: Ident,
/// The lifetime parameters for this path segment.
pub lifetimes: Vec<Lifetime>,
/// The type parameters for this path segment, if present.
pub types: OwnedSlice<P<Ty>>,
}
pub type CrateNum = u32;
pub type NodeId = u32;
#[deriving(Clone, Eq, Ord, PartialOrd, PartialEq, Encodable, Decodable, Hash, Show)]
pub struct DefId {
pub krate: CrateNum,
pub node: NodeId,
}
/// Item definitions in the currently-compiled crate would have the CrateNum
/// LOCAL_CRATE in their DefId.
pub static LOCAL_CRATE: CrateNum = 0;
pub static CRATE_NODE_ID: NodeId = 0;
/// When parsing and doing expansions, we initially give all AST nodes this AST
/// node value. Then later, in the renumber pass, we renumber them to have
/// small, positive ids.
pub static DUMMY_NODE_ID: NodeId = -1;
/// The AST represents all type param bounds as types.
/// typeck::collect::compute_bounds matches these against
/// the "special" built-in traits (see middle::lang_items) and
/// detects Copy, Send and Sync.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum TyParamBound {
TraitTyParamBound(TraitRef),
UnboxedFnTyParamBound(UnboxedFnTy),
RegionTyParamBound(Lifetime)
}
pub type TyParamBounds = OwnedSlice<TyParamBound>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct TyParam {
pub ident: Ident,
pub id: NodeId,
pub bounds: TyParamBounds,
pub unbound: Option<TyParamBound>,
pub default: Option<P<Ty>>,
pub span: Span
}
/// Represents lifetimes and type parameters attached to a declaration
/// of a function, enum, trait, etc.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Generics {
pub lifetimes: Vec<LifetimeDef>,
pub ty_params: OwnedSlice<TyParam>,
pub where_clause: WhereClause,
}
impl Generics {
pub fn is_parameterized(&self) -> bool {
self.lifetimes.len() + self.ty_params.len() > 0
}
pub fn is_lt_parameterized(&self) -> bool {
self.lifetimes.len() > 0
}
pub fn is_type_parameterized(&self) -> bool {
self.ty_params.len() > 0
}
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct WhereClause {
pub id: NodeId,
pub predicates: Vec<WherePredicate>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct WherePredicate {
pub id: NodeId,
pub span: Span,
pub ident: Ident,
pub bounds: OwnedSlice<TyParamBound>,
}
/// The set of MetaItems that define the compilation environment of the crate,
/// used to drive conditional compilation
pub type CrateConfig = Vec<Gc<MetaItem>>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Crate {
pub module: Mod,
pub attrs: Vec<Attribute>,
pub config: CrateConfig,
pub span: Span,
pub exported_macros: Vec<Gc<Item>>
}
pub type MetaItem = Spanned<MetaItem_>;
#[deriving(Clone, Eq, Encodable, Decodable, Hash, Show)]
pub enum MetaItem_ {
MetaWord(InternedString),
MetaList(InternedString, Vec<Gc<MetaItem>>),
MetaNameValue(InternedString, Lit),
}
// can't be derived because the MetaList requires an unordered comparison
impl PartialEq for MetaItem_ {
fn eq(&self, other: &MetaItem_) -> bool {
match *self {
MetaWord(ref ns) => match *other {
MetaWord(ref no) => (*ns) == (*no),
_ => false
},
MetaNameValue(ref ns, ref vs) => match *other {
MetaNameValue(ref no, ref vo) => {
(*ns) == (*no) && vs.node == vo.node
}
_ => false
},
MetaList(ref ns, ref miss) => match *other {
MetaList(ref no, ref miso) => {
ns == no &&
miss.iter().all(|mi| miso.iter().any(|x| x.node == mi.node))
}
_ => false
}
}
}
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Block {
pub view_items: Vec<ViewItem>,
pub stmts: Vec<Gc<Stmt>>,
pub expr: Option<Gc<Expr>>,
pub id: NodeId,
pub rules: BlockCheckMode,
pub span: Span,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Pat {
pub id: NodeId,
pub node: Pat_,
pub span: Span,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct FieldPat {
pub ident: Ident,
pub pat: Gc<Pat>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum BindingMode {
BindByRef(Mutability),
BindByValue(Mutability),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum PatWildKind {
/// Represents the wildcard pattern `_`
PatWildSingle,
/// Represents the wildcard pattern `..`
PatWildMulti,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Pat_ {
/// Represents a wildcard pattern (either `_` or `..`)
PatWild(PatWildKind),
/// A PatIdent may either be a new bound variable,
/// or a nullary enum (in which case the third field
/// is None).
/// In the nullary enum case, the parser can't determine
/// which it is. The resolver determines this, and
/// records this pattern's NodeId in an auxiliary
/// set (of "PatIdents that refer to nullary enums")
PatIdent(BindingMode, SpannedIdent, Option<Gc<Pat>>),
/// "None" means a * pattern where we don't bind the fields to names.
PatEnum(Path, Option<Vec<Gc<Pat>>>),
PatStruct(Path, Vec<FieldPat>, bool),
PatTup(Vec<Gc<Pat>>),
PatBox(Gc<Pat>),
PatRegion(Gc<Pat>), // reference pattern
PatLit(Gc<Expr>),
PatRange(Gc<Expr>, Gc<Expr>),
/// [a, b, ..i, y, z] is represented as:
/// PatVec(~[a, b], Some(i), ~[y, z])
PatVec(Vec<Gc<Pat>>, Option<Gc<Pat>>, Vec<Gc<Pat>>),
PatMac(Mac),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Mutability {
MutMutable,
MutImmutable,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum BinOp {
BiAdd,
BiSub,
BiMul,
BiDiv,
BiRem,
BiAnd,
BiOr,
BiBitXor,
BiBitAnd,
BiBitOr,
BiShl,
BiShr,
BiEq,
BiLt,
BiLe,
BiNe,
BiGe,
BiGt,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum UnOp {
UnBox,
UnUniq,
UnDeref,
UnNot,
UnNeg
}
pub type Stmt = Spanned<Stmt_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Stmt_ {
/// Could be an item or a local (let) binding:
StmtDecl(Gc<Decl>, NodeId),
/// Expr without trailing semi-colon (must have unit type):
StmtExpr(Gc<Expr>, NodeId),
/// Expr with trailing semi-colon (may have any type):
StmtSemi(Gc<Expr>, NodeId),
/// bool: is there a trailing sem-colon?
StmtMac(Mac, bool),
}
/// Where a local declaration came from: either a true `let ... =
/// ...;`, or one desugared from the pattern of a for loop.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum LocalSource {
LocalLet,
LocalFor,
}
// FIXME (pending discussion of #1697, #2178...): local should really be
// a refinement on pat.
/// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;`
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Local {
pub ty: P<Ty>,
pub pat: Gc<Pat>,
pub init: Option<Gc<Expr>>,
pub id: NodeId,
pub span: Span,
pub source: LocalSource,
}
pub type Decl = Spanned<Decl_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Decl_ {
/// A local (let) binding:
DeclLocal(Gc<Local>),
/// An item binding:
DeclItem(Gc<Item>),
}
/// represents one arm of a 'match'
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Arm {
pub attrs: Vec<Attribute>,
pub pats: Vec<Gc<Pat>>,
pub guard: Option<Gc<Expr>>,
pub body: Gc<Expr>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Field {
pub ident: SpannedIdent,
pub expr: Gc<Expr>,
pub span: Span,
}
pub type SpannedIdent = Spanned<Ident>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum BlockCheckMode {
DefaultBlock,
UnsafeBlock(UnsafeSource),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum UnsafeSource {
CompilerGenerated,
UserProvided,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Expr {
pub id: NodeId,
pub node: Expr_,
pub span: Span,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Expr_ {
/// First expr is the place; second expr is the value.
ExprBox(Gc<Expr>, Gc<Expr>),
ExprVec(Vec<Gc<Expr>>),
ExprCall(Gc<Expr>, Vec<Gc<Expr>>),
ExprMethodCall(SpannedIdent, Vec<P<Ty>>, Vec<Gc<Expr>>),
ExprTup(Vec<Gc<Expr>>),
ExprBinary(BinOp, Gc<Expr>, Gc<Expr>),
ExprUnary(UnOp, Gc<Expr>),
ExprLit(Gc<Lit>),
ExprCast(Gc<Expr>, P<Ty>),
ExprIf(Gc<Expr>, P<Block>, Option<Gc<Expr>>),
// FIXME #6993: change to Option<Name> ... or not, if these are hygienic.
ExprWhile(Gc<Expr>, P<Block>, Option<Ident>),
// FIXME #6993: change to Option<Name> ... or not, if these are hygienic.
ExprForLoop(Gc<Pat>, Gc<Expr>, P<Block>, Option<Ident>),
// Conditionless loop (can be exited with break, cont, or ret)
// FIXME #6993: change to Option<Name> ... or not, if these are hygienic.
ExprLoop(P<Block>, Option<Ident>),
ExprMatch(Gc<Expr>, Vec<Arm>),
ExprFnBlock(CaptureClause, P<FnDecl>, P<Block>),
ExprProc(P<FnDecl>, P<Block>),
ExprUnboxedFn(CaptureClause, UnboxedClosureKind, P<FnDecl>, P<Block>),
ExprBlock(P<Block>),
ExprAssign(Gc<Expr>, Gc<Expr>),
ExprAssignOp(BinOp, Gc<Expr>, Gc<Expr>),
ExprField(Gc<Expr>, SpannedIdent, Vec<P<Ty>>),
ExprIndex(Gc<Expr>, Gc<Expr>),
/// Variable reference, possibly containing `::` and/or
/// type parameters, e.g. foo::bar::<baz>
ExprPath(Path),
ExprAddrOf(Mutability, Gc<Expr>),
ExprBreak(Option<Ident>),
ExprAgain(Option<Ident>),
ExprRet(Option<Gc<Expr>>),
ExprInlineAsm(InlineAsm),
ExprMac(Mac),
/// A struct literal expression.
ExprStruct(Path, Vec<Field> , Option<Gc<Expr>> /* base */),
/// A vector literal constructed from one repeated element.
ExprRepeat(Gc<Expr> /* element */, Gc<Expr> /* count */),
/// No-op: used solely so we can pretty-print faithfully
ExprParen(Gc<Expr>)
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum CaptureClause {
CaptureByValue,
CaptureByRef,
}
/// When the main rust parser encounters a syntax-extension invocation, it
/// parses the arguments to the invocation as a token-tree. This is a very
/// loose structure, such that all sorts of different AST-fragments can
/// be passed to syntax extensions using a uniform type.
///
/// If the syntax extension is an MBE macro, it will attempt to match its
/// LHS "matchers" against the provided token tree, and if it finds a
/// match, will transcribe the RHS token tree, splicing in any captured
/// macro_parser::matched_nonterminals into the TTNonterminals it finds.
///
/// The RHS of an MBE macro is the only place a TTNonterminal or TTSeq
/// makes any real sense. You could write them elsewhere but nothing
/// else knows what to do with them, so you'll probably get a syntax
/// error.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
#[doc="For macro invocations; parsing is delegated to the macro"]
pub enum TokenTree {
/// A single token
TTTok(Span, ::parse::token::Token),
/// A delimited sequence (the delimiters appear as the first
/// and last elements of the vector)
// FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST.
TTDelim(Rc<Vec<TokenTree>>),
// These only make sense for right-hand-sides of MBE macros:
/// A kleene-style repetition sequence with a span, a TTForest,
/// an optional separator, and a boolean where true indicates
/// zero or more (..), and false indicates one or more (+).
// FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST.
TTSeq(Span, Rc<Vec<TokenTree>>, Option<::parse::token::Token>, bool),
/// A syntactic variable that will be filled in by macro expansion.
TTNonterminal(Span, Ident)
}
// Matchers are nodes defined-by and recognized-by the main rust parser and
// language, but they're only ever found inside syntax-extension invocations;
// indeed, the only thing that ever _activates_ the rules in the rust parser
// for parsing a matcher is a matcher looking for the 'matchers' nonterminal
// itself. Matchers represent a small sub-language for pattern-matching
// token-trees, and are thus primarily used by the macro-defining extension
// itself.
//
// MatchTok
// --------
//
// A matcher that matches a single token, denoted by the token itself. So
// long as there's no $ involved.
//
//
// MatchSeq
// --------
//
// A matcher that matches a sequence of sub-matchers, denoted various
// possible ways:
//
// $(M)* zero or more Ms
// $(M)+ one or more Ms
// $(M),+ one or more comma-separated Ms
// $(A B C);* zero or more semi-separated 'A B C' seqs
//
//
// MatchNonterminal
// -----------------
//
// A matcher that matches one of a few interesting named rust
// nonterminals, such as types, expressions, items, or raw token-trees. A
// black-box matcher on expr, for example, binds an expr to a given ident,
// and that ident can re-occur as an interpolation in the RHS of a
// macro-by-example rule. For example:
//
// $foo:expr => 1 + $foo // interpolate an expr
// $foo:tt => $foo // interpolate a token-tree
// $foo:tt => bar! $foo // only other valid interpolation
// // is in arg position for another
// // macro
//
// As a final, horrifying aside, note that macro-by-example's input is
// also matched by one of these matchers. Holy self-referential! It is matched
// by a MatchSeq, specifically this one:
//
// $( $lhs:matchers => $rhs:tt );+
//
// If you understand that, you have closed the loop and understand the whole
// macro system. Congratulations.
pub type Matcher = Spanned<Matcher_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Matcher_ {
/// Match one token
MatchTok(::parse::token::Token),
/// Match repetitions of a sequence: body, separator, zero ok?,
/// lo, hi position-in-match-array used:
MatchSeq(Vec<Matcher> , Option<::parse::token::Token>, bool, uint, uint),
/// Parse a Rust NT: name to bind, name of NT, position in match array:
MatchNonterminal(Ident, Ident, uint)
}
pub type Mac = Spanned<Mac_>;
/// Represents a macro invocation. The Path indicates which macro
/// is being invoked, and the vector of token-trees contains the source
/// of the macro invocation.
/// There's only one flavor, now, so this could presumably be simplified.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Mac_ {
// NB: the additional ident for a macro_rules-style macro is actually
// stored in the enclosing item. Oog.
MacInvocTT(Path, Vec<TokenTree> , SyntaxContext), // new macro-invocation
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum StrStyle {
CookedStr,
RawStr(uint)
}
pub type Lit = Spanned<Lit_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Sign {
Minus,
Plus
}
impl<T: PartialOrd+Zero> Sign {
pub fn new(n: T) -> Sign {
if n < Zero::zero() {
Minus
} else {
Plus
}
}
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum LitIntType {
SignedIntLit(IntTy, Sign),
UnsignedIntLit(UintTy),
UnsuffixedIntLit(Sign)
}
impl LitIntType {
pub fn suffix_len(&self) -> uint {
match *self {
UnsuffixedIntLit(_) => 0,
SignedIntLit(s, _) => s.suffix_len(),
UnsignedIntLit(u) => u.suffix_len()
}
}
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Lit_ {
LitStr(InternedString, StrStyle),
LitBinary(Rc<Vec<u8> >),
LitByte(u8),
LitChar(char),
LitInt(u64, LitIntType),
LitFloat(InternedString, FloatTy),
LitFloatUnsuffixed(InternedString),
LitNil,
LitBool(bool),
}
// NB: If you change this, you'll probably want to change the corresponding
// type structure in middle/ty.rs as well.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct MutTy {
pub ty: P<Ty>,
pub mutbl: Mutability,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct TypeField {
pub ident: Ident,
pub mt: MutTy,
pub span: Span,
}
/// Represents a required method in a trait declaration,
/// one without a default implementation
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct TypeMethod {
pub ident: Ident,
pub attrs: Vec<Attribute>,
pub fn_style: FnStyle,
pub abi: Abi,
pub decl: P<FnDecl>,
pub generics: Generics,
pub explicit_self: ExplicitSelf,
pub id: NodeId,
pub span: Span,
pub vis: Visibility,
}
/// Represents a method declaration in a trait declaration, possibly including
/// a default implementation A trait method is either required (meaning it
/// doesn't have an implementation, just a signature) or provided (meaning it
/// has a default implementation).
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum TraitItem {
RequiredMethod(TypeMethod),
ProvidedMethod(Gc<Method>),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum ImplItem {
MethodImplItem(Gc<Method>),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum IntTy {
TyI,
TyI8,
TyI16,
TyI32,
TyI64,
}
impl fmt::Show for IntTy {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", ast_util::int_ty_to_string(*self, None))
}
}
impl IntTy {
pub fn suffix_len(&self) -> uint {
match *self {
TyI => 1,
TyI8 => 2,
TyI16 | TyI32 | TyI64 => 3,
}
}
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum UintTy {
TyU,
TyU8,
TyU16,
TyU32,
TyU64,
}
impl UintTy {
pub fn suffix_len(&self) -> uint {
match *self {
TyU => 1,
TyU8 => 2,
TyU16 | TyU32 | TyU64 => 3,
}
}
}
impl fmt::Show for UintTy {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", ast_util::uint_ty_to_string(*self, None))
}
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum FloatTy {
TyF32,
TyF64,
}
impl fmt::Show for FloatTy {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", ast_util::float_ty_to_string(*self))
}
}
impl FloatTy {
pub fn suffix_len(&self) -> uint {
match *self {
TyF32 | TyF64 => 3, // add F128 handling here
}
}
}
// NB PartialEq method appears below.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Ty {
pub id: NodeId,
pub node: Ty_,
pub span: Span,
}
/// Not represented directly in the AST, referred to by name through a ty_path.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum PrimTy {
TyInt(IntTy),
TyUint(UintTy),
TyFloat(FloatTy),
TyStr,
TyBool,
TyChar
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Onceness {
Once,
Many
}
impl fmt::Show for Onceness {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Once => "once".fmt(f),
Many => "many".fmt(f),
}
}
}
/// Represents the type of a closure
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct ClosureTy {
pub lifetimes: Vec<LifetimeDef>,
pub fn_style: FnStyle,
pub onceness: Onceness,
pub decl: P<FnDecl>,
pub bounds: TyParamBounds,
}<|fim▁hole|> pub fn_style: FnStyle,
pub abi: Abi,
pub lifetimes: Vec<LifetimeDef>,
pub decl: P<FnDecl>
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct UnboxedFnTy {
pub kind: UnboxedClosureKind,
pub decl: P<FnDecl>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Ty_ {
TyNil,
TyBot, /* bottom type */
TyBox(P<Ty>),
TyUniq(P<Ty>),
TyVec(P<Ty>),
TyFixedLengthVec(P<Ty>, Gc<Expr>),
TyPtr(MutTy),
TyRptr(Option<Lifetime>, MutTy),
TyClosure(Gc<ClosureTy>),
TyProc(Gc<ClosureTy>),
TyBareFn(Gc<BareFnTy>),
TyUnboxedFn(Gc<UnboxedFnTy>),
TyTup(Vec<P<Ty>> ),
TyPath(Path, Option<TyParamBounds>, NodeId), // for #7264; see above
/// No-op; kept solely so that we can pretty-print faithfully
TyParen(P<Ty>),
TyTypeof(Gc<Expr>),
/// TyInfer means the type should be inferred instead of it having been
/// specified. This can appear anywhere in a type.
TyInfer,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum AsmDialect {
AsmAtt,
AsmIntel
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct InlineAsm {
pub asm: InternedString,
pub asm_str_style: StrStyle,
pub outputs: Vec<(InternedString, Gc<Expr>, bool)>,
pub inputs: Vec<(InternedString, Gc<Expr>)>,
pub clobbers: InternedString,
pub volatile: bool,
pub alignstack: bool,
pub dialect: AsmDialect
}
/// represents an argument in a function header
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Arg {
pub ty: P<Ty>,
pub pat: Gc<Pat>,
pub id: NodeId,
}
impl Arg {
pub fn new_self(span: Span, mutability: Mutability, self_ident: Ident) -> Arg {
let path = Spanned{span:span,node:self_ident};
Arg {
// HACK(eddyb) fake type for the self argument.
ty: P(Ty {
id: DUMMY_NODE_ID,
node: TyInfer,
span: DUMMY_SP,
}),
pat: box(GC) Pat {
id: DUMMY_NODE_ID,
node: PatIdent(BindByValue(mutability), path, None),
span: span
},
id: DUMMY_NODE_ID
}
}
}
/// represents the header (not the body) of a function declaration
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct FnDecl {
pub inputs: Vec<Arg>,
pub output: P<Ty>,
pub cf: RetStyle,
pub variadic: bool
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum FnStyle {
/// Declared with "unsafe fn"
UnsafeFn,
/// Declared with "fn"
NormalFn,
}
impl fmt::Show for FnStyle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
NormalFn => "normal".fmt(f),
UnsafeFn => "unsafe".fmt(f),
}
}
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum RetStyle {
/// Functions with return type ! that always
/// raise an error or exit (i.e. never return to the caller)
NoReturn,
/// Everything else
Return,
}
/// Represents the kind of 'self' associated with a method
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum ExplicitSelf_ {
/// No self
SelfStatic,
/// `self`
SelfValue(Ident),
/// `&'lt self`, `&'lt mut self`
SelfRegion(Option<Lifetime>, Mutability, Ident),
/// `self: TYPE`
SelfExplicit(P<Ty>, Ident),
}
pub type ExplicitSelf = Spanned<ExplicitSelf_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Method {
pub attrs: Vec<Attribute>,
pub id: NodeId,
pub span: Span,
pub node: Method_,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Method_ {
/// Represents a method declaration
MethDecl(Ident,
Generics,
Abi,
ExplicitSelf,
FnStyle,
P<FnDecl>,
P<Block>,
Visibility),
/// Represents a macro in method position
MethMac(Mac),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Mod {
/// A span from the first token past `{` to the last token until `}`.
/// For `mod foo;`, the inner span ranges from the first token
/// to the last token in the external file.
pub inner: Span,
pub view_items: Vec<ViewItem>,
pub items: Vec<Gc<Item>>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct ForeignMod {
pub abi: Abi,
pub view_items: Vec<ViewItem>,
pub items: Vec<Gc<ForeignItem>>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct VariantArg {
pub ty: P<Ty>,
pub id: NodeId,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum VariantKind {
TupleVariantKind(Vec<VariantArg>),
StructVariantKind(Gc<StructDef>),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct EnumDef {
pub variants: Vec<P<Variant>>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Variant_ {
pub name: Ident,
pub attrs: Vec<Attribute>,
pub kind: VariantKind,
pub id: NodeId,
pub disr_expr: Option<Gc<Expr>>,
pub vis: Visibility,
}
pub type Variant = Spanned<Variant_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum PathListItem_ {
PathListIdent { pub name: Ident, pub id: NodeId },
PathListMod { pub id: NodeId }
}
impl PathListItem_ {
pub fn id(&self) -> NodeId {
match *self {
PathListIdent { id, .. } | PathListMod { id } => id
}
}
}
pub type PathListItem = Spanned<PathListItem_>;
pub type ViewPath = Spanned<ViewPath_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum ViewPath_ {
/// `foo::bar::baz as quux`
///
/// or just
///
/// `foo::bar::baz` (with `as baz` implicitly on the right)
ViewPathSimple(Ident, Path, NodeId),
/// `foo::bar::*`
ViewPathGlob(Path, NodeId),
/// `foo::bar::{a,b,c}`
ViewPathList(Path, Vec<PathListItem> , NodeId)
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct ViewItem {
pub node: ViewItem_,
pub attrs: Vec<Attribute>,
pub vis: Visibility,
pub span: Span,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum ViewItem_ {
/// Ident: name used to refer to this crate in the code
/// optional (InternedString,StrStyle): if present, this is a location
/// (containing arbitrary characters) from which to fetch the crate sources
/// For example, extern crate whatever = "github.com/rust-lang/rust"
ViewItemExternCrate(Ident, Option<(InternedString,StrStyle)>, NodeId),
ViewItemUse(Gc<ViewPath>),
}
/// Meta-data associated with an item
pub type Attribute = Spanned<Attribute_>;
/// Distinguishes between Attributes that decorate items and Attributes that
/// are contained as statements within items. These two cases need to be
/// distinguished for pretty-printing.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum AttrStyle {
AttrOuter,
AttrInner,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct AttrId(pub uint);
/// Doc-comments are promoted to attributes that have is_sugared_doc = true
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Attribute_ {
pub id: AttrId,
pub style: AttrStyle,
pub value: Gc<MetaItem>,
pub is_sugared_doc: bool,
}
/// TraitRef's appear in impls.
/// resolve maps each TraitRef's ref_id to its defining trait; that's all
/// that the ref_id is for. The impl_id maps to the "self type" of this impl.
/// If this impl is an ItemImpl, the impl_id is redundant (it could be the
/// same as the impl's node id).
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct TraitRef {
pub path: Path,
pub ref_id: NodeId,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Visibility {
Public,
Inherited,
}
impl Visibility {
pub fn inherit_from(&self, parent_visibility: Visibility) -> Visibility {
match self {
&Inherited => parent_visibility,
&Public => *self
}
}
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct StructField_ {
pub kind: StructFieldKind,
pub id: NodeId,
pub ty: P<Ty>,
pub attrs: Vec<Attribute>,
}
impl StructField_ {
pub fn ident(&self) -> Option<Ident> {
match self.kind {
NamedField(ref ident, _) => Some(ident.clone()),
UnnamedField(_) => None
}
}
}
pub type StructField = Spanned<StructField_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum StructFieldKind {
NamedField(Ident, Visibility),
/// Element of a tuple-like struct
UnnamedField(Visibility),
}
impl StructFieldKind {
pub fn is_unnamed(&self) -> bool {
match *self {
UnnamedField(..) => true,
NamedField(..) => false,
}
}
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct StructDef {
/// Fields, not including ctor
pub fields: Vec<StructField>,
/// ID of the constructor. This is only used for tuple- or enum-like
/// structs.
pub ctor_id: Option<NodeId>,
/// Super struct, if specified.
pub super_struct: Option<P<Ty>>,
/// True iff the struct may be inherited from.
pub is_virtual: bool,
}
/*
FIXME (#3300): Should allow items to be anonymous. Right now
we just use dummy names for anon items.
*/
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Item {
pub ident: Ident,
pub attrs: Vec<Attribute>,
pub id: NodeId,
pub node: Item_,
pub vis: Visibility,
pub span: Span,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Item_ {
ItemStatic(P<Ty>, Mutability, Gc<Expr>),
ItemFn(P<FnDecl>, FnStyle, Abi, Generics, P<Block>),
ItemMod(Mod),
ItemForeignMod(ForeignMod),
ItemTy(P<Ty>, Generics),
ItemEnum(EnumDef, Generics),
ItemStruct(Gc<StructDef>, Generics),
/// Represents a Trait Declaration
ItemTrait(Generics,
Option<TyParamBound>, // (optional) default bound not required for Self.
// Currently, only Sized makes sense here.
TyParamBounds,
Vec<TraitItem>),
ItemImpl(Generics,
Option<TraitRef>, // (optional) trait this impl implements
P<Ty>, // self
Vec<ImplItem>),
/// A macro invocation (which includes macro definition)
ItemMac(Mac),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct ForeignItem {
pub ident: Ident,
pub attrs: Vec<Attribute>,
pub node: ForeignItem_,
pub id: NodeId,
pub span: Span,
pub vis: Visibility,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum ForeignItem_ {
ForeignItemFn(P<FnDecl>, Generics),
ForeignItemStatic(P<Ty>, /* is_mutbl */ bool),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum UnboxedClosureKind {
FnUnboxedClosureKind,
FnMutUnboxedClosureKind,
FnOnceUnboxedClosureKind,
}
/// The data we save and restore about an inlined item or method. This is not
/// part of the AST that we parse from a file, but it becomes part of the tree
/// that we trans.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum InlinedItem {
IIItem(Gc<Item>),
IITraitItem(DefId /* impl id */, InlinedTraitItem),
IIForeign(Gc<ForeignItem>),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum InlinedTraitItem {
ProvidedInlinedTraitItem(Gc<Method>),
RequiredInlinedTraitItem(Gc<Method>),
}
#[cfg(test)]
mod test {
use serialize::json;
use serialize;
use codemap::*;
use super::*;
// are ASTs encodable?
#[test]
fn check_asts_encodable() {
use std::io;
let e = Crate {
module: Mod {
inner: Span {
lo: BytePos(11),
hi: BytePos(19),
expn_info: None,
},
view_items: Vec::new(),
items: Vec::new(),
},
attrs: Vec::new(),
config: Vec::new(),
span: Span {
lo: BytePos(10),
hi: BytePos(20),
expn_info: None,
},
exported_macros: Vec::new(),
};
// doesn't matter which encoder we use....
let _f = &e as &serialize::Encodable<json::Encoder, io::IoError>;
}
}<|fim▁end|> |
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct BareFnTy { |
<|file_name|>instrumenttype.py<|end_file_name|><|fim▁begin|>from openerp import fields, models,osv<|fim▁hole|>
from base_olims_model import BaseOLiMSModel
from openerp.tools.translate import _
from fields.string_field import StringField
from fields.text_field import TextField
from fields.widget.widget import TextAreaWidget
schema = (StringField('Title',
required=1,
),
TextField('Description',
widget=TextAreaWidget(
label=_('Description'),
description=_('Used in item listings and search results.')),
),
fields.One2many('olims.instrument',
'Type',
string='Type')
)
class InstrumentType(models.Model, BaseOLiMSModel):#(BaseContent):
_name = 'olims.instrument_type'
_rec_name = 'Title'
InstrumentType.initialze(schema)<|fim▁end|> | |
<|file_name|>compress_nc.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from __future__ import print_function<|fim▁hole|>import multiprocessing as mp
import subprocess as sp
import tempfile
import shlex
import shutil
"""
Compress all netcdf files under a directories.
compress_nc.py ~/exps/access/cm_1440x1080-1/archive/ ./'
This will recursively search under that directory and compress every single netcdf file.
"""
def compress_netcdf_file(filename, compression_level=7):
"""
Use nccopy to compress a netcdf file.
"""
_, tmp = tempfile.mkstemp()
cmd = 'nccopy -d {} {} {}'.format(compression_level, filename, tmp)
print(cmd)
ret = sp.call(shlex.split(cmd))
assert(ret == 0)
# Put a file lock on 'filename'?
shutil.move(tmp, filename)
def find_netcdf_files(path):
"""
Return full path of all netcdf files under 'path'
"""
netcdf_files = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".nc"):
full_path = os.path.join(os.path.abspath(root), file)
if not os.path.islink(full_path):
netcdf_files.append(full_path)
return netcdf_files
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dirs", nargs='+',
help="A list of directories to search for netcdf files.")
args = parser.parse_args()
all_netcdf_files = []
for d in args.dirs:
all_netcdf_files.extend(find_netcdf_files(d))
# Make sure there are no duplicates.
all_netcdf_files = list(set(all_netcdf_files))
pool = mp.Pool()
results = pool.map(compress_netcdf_file, all_netcdf_files)
pool.close()
pool.join()
if __name__ == "__main__":
sys.exit(main())<|fim▁end|> |
import argparse
import sys
import os |
<|file_name|>security-group-page.component.ts<|end_file_name|><|fim▁begin|>import { Component, Input } from '@angular/core';
import { ActivatedRoute, Router } from '@angular/router';
import { SecurityGroup } from '../sg.model';
import { ViewMode } from '../../shared/components/view-mode-switch/view-mode-switch.component';
import { ListService } from '../../shared/components/list/list.service';
import { SecurityGroupViewMode } from '../sg-view-mode';
import { VirtualMachine } from '../../vm';
import { NgrxEntities } from '../../shared/interfaces';
@Component({
selector: 'cs-security-group-page',
templateUrl: 'security-group-page.component.html',
styleUrls: ['security-group-page.component.scss'],
providers: [ListService],
})
export class SecurityGroupPageComponent {
@Input()
public securityGroups: SecurityGroup[];
@Input()
public isLoading = false;
@Input()
public viewMode: SecurityGroupViewMode;
@Input()
public query: string;
@Input()
public vmList: NgrxEntities<VirtualMachine>;
public mode: ViewMode;
public viewModeKey = 'sgPageViewMode';
public get isCreationEnabled(): boolean {
return this.viewMode !== SecurityGroupViewMode.Private;
}
constructor(
private router: Router,<|fim▁hole|> public get showSidebarDetails(): boolean {
return this.activatedRoute.snapshot.firstChild.firstChild.routeConfig.path === 'details';
}
public changeMode(mode) {
this.mode = mode;
}
public showCreationDialog(): void {
this.router.navigate(['./create'], {
queryParamsHandling: 'preserve',
relativeTo: this.activatedRoute,
});
}
}<|fim▁end|> | private activatedRoute: ActivatedRoute,
public listService: ListService,
) {}
|
<|file_name|>run.go<|end_file_name|><|fim▁begin|>package main
import (
"net/url"
"time"
"github.com/codegangsta/cli"
"github.com/michaeltrobinson/cadvisor-integration/scraper"
"github.com/signalfx/metricproxy/protocol/signalfx"
log "github.com/Sirupsen/logrus"
)
var (
sfxAPIToken string
sfxIngestURL string
clusterName string
sendInterval time.Duration
cadvisorPort int
discoveryInterval time.Duration
maxDatapoints int
kubeUser string
kubePass string
)
func init() {
app.Commands = append(app.Commands, cli.Command{
Name: "run",
Usage: "start the service (the default)",<|fim▁hole|> Name: "sfx-ingest-url",
EnvVar: "SFX_ENDPOINT",
Value: "https://ingest.signalfx.com",
Usage: "SignalFx ingest URL",
},
cli.StringFlag{
Name: "sfx-api-token",
EnvVar: "SFX_API_TOKEN",
Usage: "SignalFx API token",
},
cli.StringFlag{
Name: "cluster-name",
EnvVar: "CLUSTER_NAME",
Usage: "Cluster name will appear as dimension",
},
cli.DurationFlag{
Name: "send-interval",
EnvVar: "SEND_INTERVAL",
Value: time.Second * 30,
Usage: "Rate at which data is queried from cAdvisor and send to SignalFx",
},
cli.IntFlag{
Name: "cadvisor-port",
EnvVar: "CADVISOR_PORT",
Value: 4194,
Usage: "Port on which cAdvisor listens",
},
cli.DurationFlag{
Name: "discovery-interval",
EnvVar: "NODE_SERVICE_DISCOVERY_INTERVAL",
Value: time.Minute * 5,
Usage: "Rate at which nodes and services will be rediscovered",
},
cli.StringFlag{
Name: "kube-user",
EnvVar: "KUBE_USER",
Usage: "Username to authenticate to kubernetes api",
},
cli.StringFlag{
Name: "kube-pass",
EnvVar: "KUBE_PASS",
Usage: "Password to authenticate to kubernetes api",
},
cli.IntFlag{
Name: "max-datapoints",
EnvVar: "MAX_DATAPOINTS",
Value: 50,
Usage: "How many datapoints to batch before forwarding to SignalFX",
},
},
})
}
func setupRun(c *cli.Context) error {
sfxAPIToken = c.String("sfx-api-token")
if sfxAPIToken == "" {
cli.ShowAppHelp(c)
log.Fatal("API token is required")
}
clusterName = c.String("cluster-name")
if clusterName == "" {
cli.ShowAppHelp(c)
log.Fatal("cluster name is required")
}
sfxIngestURL = c.String("sfx-ingest-url")
sendInterval = c.Duration("send-interval")
cadvisorPort = c.Int("cadvisor-port")
discoveryInterval = c.Duration("discovery-interval")
kubeUser = c.String("kube-user")
kubePass = c.String("kube-pass")
if kubeUser == "" || kubePass == "" {
cli.ShowAppHelp(c)
log.Fatal("kubernetes credentials are required")
}
maxDatapoints = c.Int("max-datapoints")
return nil
}
func run(c *cli.Context) {
s := scraper.New(
newSfxClient(sfxIngestURL, sfxAPIToken),
scraper.Config{
ClusterName: clusterName,
CadvisorPort: cadvisorPort,
KubeUser: kubeUser,
KubePass: kubePass,
MaxDatapoints: maxDatapoints,
})
if err := s.Run(sendInterval, discoveryInterval); err != nil {
log.WithError(err).Fatal("failure")
}
}
func newSfxClient(ingestURL, authToken string) *signalfx.Forwarder {
sfxEndpoint, err := url.Parse(ingestURL)
if err != nil {
panic("failed to parse SFX ingest URL")
}
return signalfx.NewSignalfxJSONForwarder(sfxEndpoint.String(), time.Second*10, authToken, 10, "", "", "")
}<|fim▁end|> | Action: run,
Before: setupRun,
Flags: []cli.Flag{
cli.StringFlag{ |
<|file_name|>dump-stats.py<|end_file_name|><|fim▁begin|>from __future__ import print_function
import time, json
# Run this as 'watch python misc/dump-stats.py' against a 'wormhole-server
# start --stats-file=stats.json'
with open("stats.json") as f:
data_s = f.read()
now = time.time()
data = json.loads(data_s)
if now < data["valid_until"]:
valid = "valid"
else:<|fim▁hole|> valid = "EXPIRED"
age = now - data["created"]
print("age: %d (%s)" % (age, valid))
print(data_s)<|fim▁end|> | |
<|file_name|>errorcodes.js<|end_file_name|><|fim▁begin|>module.exports = {
errorcodes: {
NoError: 0,
GeneralError: 1,
InvalidGame: 2,
Timeout: 3,
InvalidRequest: 4,
DoNotTrack: 5,
GeoIPDisabled: 100,
LeaderboardsDisabled: 200,
InvalidName: 201,
InvalidAuthKey: 202,
NoFacebookId: 203,
NoTableName: 204,
InvalidPermalink: 205,
NoLeaderboardId: 206,
InvalidLeaderboardId: 207,
PlayerBanned: 208,
NotBestScore: 209,
GameVarsDisabled: 300,
PlayerLevelsDisabled: 400,
InvalidRating: 401,
AlreadyRated: 402,
NoLevelName: 403,
NoLevelId: 404,
LevelAlreadyExists: 405,
AchievementsDisabled: 500,
NoPlayerId: 501,
NoPlayerName: 502,
NoAchievement: 503,
InvalidAchievement: 504,
AlreadyHadAchievementNotSaved: 505,
AlreadyHadAchievementSaved: 506,
NewsletterDisabled: 600,
MailChimpNotConfigured: 601,
MailChimpError: 602
},
descriptions: {
// General Errors
"0": "No error",
"1": "General error, this typically means the player is unable to connect",
"2": "Invalid game credentials. Make sure you use the keys you set up in your database",
"3": "Request timed out",
"4": "Invalid request",
// GeoIP Errors
"100": "GeoIP API has been disabled for this game",
// Leaderboard Errors
"200": "Leaderboard API has been disabled for this game",
"201": "The player's name was not provided when saving a score",
"203": "Player is banned from submitting scores in this game",
"204": "Score was not saved because it was not the player's best. You can allow players to have more than one score by specifying allowduplicates=true in your save options",
// GameVars Errors
"300": "GameVars API has been disabled for this game",
// LevelSharing Errors
"400": "Level sharing API has been disabled for this game",
"401": "Invalid rating value (must be 1 - 10)",
"402": "Player has already rated that level",
"403": "Missing level name",
"404": "Missing levelid",<|fim▁hole|> // Achievement errors
"500": "Achievements API has been disabled for this game",
"501": "Missing playerid",
"502": "Missing player name",
"503": "Missing achievement",
"504": "Invalid achievement for achievement key",
"505": "Player already had the achievement. You can overwrite old achievements with overwrite=true or save each time the player is awarded with allowduplicates=true",
"506": "Player already had the achievement and it was overwritten or a duplicate was saved successfully",
// Newsletter errors
"600": "Newsletter API has been disabled for this game",
"601": "MailChimp API key is not configured",
"602": "The MailChimp API returned an error"
}
};<|fim▁end|> | "405": "Level already exists",
|
<|file_name|>jsonobject.cpp<|end_file_name|><|fim▁begin|>#include "jsonobject.h"
#include "jsonparser.h"
namespace amgcommon {
namespace json {
JsonObject::JsonObject(string rawJson) {
this->originalJson = rawJson;
this->root = JsonNode();
}
JsonObject::JsonObject(const JsonObject &a) {
this->originalJson = a.originalJson;
this->root = a.root;
}
JsonObject *JsonObject::clone() {
return new JsonObject(*this);
}
JsonObject::~JsonObject() {
}
string JsonObject::toString() {
<|fim▁hole|> return root;
}
void JsonObject::setKey(string path, Object *value) {
this->root.setKey(path, value);
}
Object *JsonObject::getKey(string path) {
return this->root.getKey(path);
}
}
}<|fim▁end|> | return this->root.toString();
}
JsonNode JsonObject::getRoot() {
|
<|file_name|>util.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(unknown_lints)]
// client and server share different parts of utils.
#![allow(dead_code)]
#![allow(cast_lossless)]
use std::f64::consts::PI;
use serde_json;
use grpcio_proto::example::route_guide::*;
#[derive(Serialize, Deserialize, Debug)]
struct PointRef {
latitude: i32,
longitude: i32,
}
#[derive(Serialize, Deserialize, Debug)]
struct FeatureRef {
location: PointRef,
name: String,
}
impl From<FeatureRef> for Feature {
fn from(r: FeatureRef) -> Feature {
let mut f = Feature::default();
f.set_name(r.name);
f.mut_location().set_latitude(r.location.latitude);
f.mut_location().set_longitude(r.location.longitude);
f<|fim▁hole|>}
pub fn load_db() -> Vec<Feature> {
let data = include_str!("db.json");
let features: Vec<FeatureRef> = serde_json::from_str(data).unwrap();
features.into_iter().map(From::from).collect()
}
pub fn same_point(lhs: &Point, rhs: &Point) -> bool {
lhs.get_longitude() == rhs.get_longitude() && lhs.get_latitude() == rhs.get_latitude()
}
pub fn fit_in(lhs: &Point, rhs: &Rectangle) -> bool {
let hi = rhs.get_hi();
let lo = rhs.get_lo();
lhs.get_longitude() <= hi.get_longitude()
&& lhs.get_longitude() >= lo.get_longitude()
&& lhs.get_latitude() <= hi.get_latitude()
&& lhs.get_latitude() >= lo.get_latitude()
}
const COORD_FACTOR: f64 = 10000000.0;
pub fn convert_to_rad(num: f64) -> f64 {
num * PI / 180.0
}
pub fn format_point(p: &Point) -> String {
format!(
"{}, {}",
p.get_latitude() as f64 / COORD_FACTOR,
p.get_longitude() as f64 / COORD_FACTOR
)
}
pub fn cal_distance(lhs: &Point, rhs: &Point) -> f64 {
let lat1 = lhs.get_latitude() as f64 / COORD_FACTOR;
let lon1 = lhs.get_longitude() as f64 / COORD_FACTOR;
let lat2 = rhs.get_latitude() as f64 / COORD_FACTOR;
let lon2 = rhs.get_longitude() as f64 / COORD_FACTOR;
let lat_rad_1 = convert_to_rad(lat1);
let lat_rad_2 = convert_to_rad(lat2);
let delta_lat_rad = convert_to_rad(lat2 - lat1);
let delta_lon_rad = convert_to_rad(lon2 - lon1);
let a = (delta_lat_rad / 2.0).sin().powi(2)
+ lat_rad_1.cos() * lat_rad_2.cos() * (delta_lon_rad / 2.0).sin().powi(2);
let c = 2.0 * a.sqrt().atan2((1.0 - a).sqrt());
let r = 6371000.0; // metres
r * c
}<|fim▁end|> | } |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
#[cfg(not(target_os = "windows"))]
extern crate gaol;
#[macro_use]
extern crate gleam;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate style;
pub extern crate url;
pub extern crate util;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
extern crate webrender_traits;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use compositing::{CompositorProxy, IOCompositor};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use net::bluetooth_thread::BluetoothThreadFactory;
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use net_traits::bluetooth_thread::BluetoothMethodMsg;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::ConstellationMsg;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use util::resource_files::resources_dir_path;
use util::{opts, prefs};
pub use gleam::gl;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods + 'static> {
compositor: IOCompositor<Window>,
}
impl<Window> Browser<Window> where Window: WindowMethods + 'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
script::init();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let (webrender, webrender_api_sender) = if opts::get().use_webrender {
let mut resource_path = resources_dir_path();
resource_path.push("shaders");
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.scale_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let (webrender, webrender_sender) =
webrender::Renderer::new(webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_path: resource_path,
enable_aa: opts.enable_text_antialiasing,
enable_msaa: opts.use_msaa,
enable_profiler: opts.webrender_stats,
});
(Some(webrender), Some(webrender_sender))
} else {
(None, None)
};
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let constellation_chan = create_constellation(opts.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
devtools_chan,
supports_clipboard,
webrender_api_sender.clone());
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
}
fn create_constellation(opts: opts::Opts,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender_api_sender: Option<webrender_traits::RenderApiSender>) -> Sender<ConstellationMsg> {
let bluetooth_thread: IpcSender<BluetoothMethodMsg> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(opts.user_agent.clone(),
devtools_chan.clone(),
time_profiler_chan.clone());
let image_cache_thread = new_image_cache_thread(public_resource_threads.sender(),
webrender_api_sender.as_ref().map(|wr| wr.create_api()));
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
webrender_api_sender.as_ref().map(|wr| wr.create_api()));
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let constellation_chan =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
// Send the URL command to the constellation.
match opts.url {
Some(url) => {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
},
None => ()
};
constellation_chan
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
prefs::extend_prefs(unprivileged_content.prefs());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
script::init();
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }<|fim▁hole|>#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
}<|fim▁end|> | __errno()
}
|
<|file_name|>importborme.py<|end_file_name|><|fim▁begin|>from django.core.management.base import BaseCommand
from django.utils import timezone
import logging
import time
from borme.models import Config
from borme.parser.importer import import_borme_download
# from borme.parser.postgres import psql_update_documents
import borme.parser.importer
from libreborme.utils import get_git_revision_short_hash
class Command(BaseCommand):
# args = '<ISO formatted date (ex. 2015-01-01 or --init)> [--local]'
help = 'Import BORMEs from date'
def add_arguments(self, parser):
parser.add_argument(
'-f', '--from',
nargs=1, required=True,
help='ISO formatted date (ex. 2015-01-01) or "init"')
parser.add_argument(
'-t', '--to',
nargs=1, required=True,
help='ISO formatted date (ex. 2016-01-01) or "today"')
parser.add_argument(
'--local-only',
action='store_true',
default=False,
help='Do not download any file')<|fim▁hole|> help='Abort if local file is not found')
# json only, pdf only...
def handle(self, *args, **options):
self.set_verbosity(int(options['verbosity']))
start_time = time.time()
import_borme_download(options['from'][0],
options['to'][0],
local_only=options['local_only'],
no_missing=options['no_missing'])
config = Config.objects.first()
if config:
config.last_modified = timezone.now()
else:
config = Config(last_modified=timezone.now())
config.version = get_git_revision_short_hash()
config.save()
# Update Full Text Search
# psql_update_documents()
# Elapsed time
elapsed_time = time.time() - start_time
print('\nElapsed time: %.2f seconds' % elapsed_time)
def set_verbosity(self, verbosity):
if verbosity == 0:
borme.parser.importer.logger.setLevel(logging.ERROR)
elif verbosity == 1: # default
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity == 2:
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity > 2:
borme.parser.importer.logger.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)<|fim▁end|> | parser.add_argument(
'--no-missing',
action='store_true',
default=False, |
<|file_name|>checkMeshDict.H<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | cfMesh: A library for mesh generation
\\ / O peration |
\\ / A nd | Author: Franjo Juretic ([email protected])
\\/ M anipulation | Copyright (C) Creative Fields, Ltd.
-------------------------------------------------------------------------------
License
This file is part of cfMesh.
cfMesh is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
cfMesh is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
Class
checkMeshDict
Description
Check whether the meshDict file is set correctly
SourceFiles
checkMeshDict.C
\*---------------------------------------------------------------------------*/
#ifndef checkMeshDict_H
#define checkMeshDict_H
#include "IOdictionary.H"
#include <map>
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class checkMeshDict Declaration
\*---------------------------------------------------------------------------*/
class checkMeshDict
{
//- Reference to the mesh
IOdictionary& meshDict_;
// Private member functions
//- check patchCellSize entry
void checkPatchCellSize() const;
//- check subsetCellSize entry
void checkSubsetCellSize() const;
//- check local refinement level
void checkLocalRefinementLevel() const;
//- check keepCellsIntersectingPatches entry
void checkKeepCellsIntersectingPatches() const;
//- check removeCellsIntersectingPatches entry
void checkRemoveCellsIntersectingPatches() const;
//- check objectRefinements entry
void checkObjectRefinements() const;
//- check entry for boundary layers
void checkBoundaryLayers() const;
//- check renameBoundary entry
void checkRenameBoundary() const;
//- perform all checks
void checkEntries() const;
//- update patchCellSize entry
void updatePatchCellSize(const std::map<word, wordList>&);
//- update subsetCellSize entry
void updateSubsetCellSize(const std::map<word, wordList>&);
//- update local refinement
void updateLocalRefinementLevel(const std::map<word, wordList>&);
//- check keepCellsIntersectingPatches entry
void updateKeepCellsIntersectingPatches
(
const std::map<word, wordList>&<|fim▁hole|> void updateRemoveCellsIntersectingPatches
(
const std::map<word, wordList>&
);
//- check objectRefinements entry
void updateObjectRefinements(const std::map<word, wordList>&);
//- check entry for boundary layers
void updateBoundaryLayers(const std::map<word, wordList>&);
//- check renameBoundary entry
void updateRenameBoundary
(
const std::map<word, wordList>&,
const std::map<word, word>&
);
public:
// Constructors
//- Construct from IOdictionary
checkMeshDict(IOdictionary& meshDict);
// Destructor
~checkMeshDict();
// Public member functions
//- update meshDict based on modification of patches in the surface
void updateDictionaries
(
const std::map<word, wordList>& patchesForPatch,
const std::map<word, word>& patchTypes,
const bool renamePatches = true
);
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //<|fim▁end|> | );
//- check removeCellsIntersectingPatches entry |
<|file_name|>CacheSerializationTest.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.cache;
import com.hazelcast.cache.impl.CachePartitionEventData;
import com.hazelcast.cache.impl.CachePartitionSegment;
import com.hazelcast.cache.impl.CacheService;
import com.hazelcast.cache.impl.operation.CacheReplicationOperation;
import com.hazelcast.cache.impl.record.CacheRecord;
import com.hazelcast.cache.impl.record.CacheRecordFactory;
import com.hazelcast.cluster.Address;
import com.hazelcast.cluster.Member;
import com.hazelcast.cluster.impl.MemberImpl;
import com.hazelcast.config.InMemoryFormat;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.instance.impl.HazelcastInstanceImpl;
import com.hazelcast.instance.impl.HazelcastInstanceProxy;
import com.hazelcast.internal.serialization.Data;
import com.hazelcast.internal.serialization.SerializationService;
import com.hazelcast.internal.serialization.SerializationServiceBuilder;
import com.hazelcast.internal.serialization.impl.DefaultSerializationServiceBuilder;
import com.hazelcast.internal.services.ServiceNamespace;
import com.hazelcast.internal.util.Clock;
import com.hazelcast.internal.util.CollectionUtil;
import com.hazelcast.spi.impl.NodeEngineImpl;
import com.hazelcast.test.HazelcastParallelClassRunner;
import com.hazelcast.test.HazelcastTestSupport;
import com.hazelcast.test.TestHazelcastInstanceFactory;
import com.hazelcast.test.annotation.ParallelJVMTest;
import com.hazelcast.test.annotation.QuickTest;
import com.hazelcast.version.MemberVersion;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import javax.cache.Cache;
import javax.cache.CacheManager;
import javax.cache.configuration.CompleteConfiguration;
import javax.cache.configuration.MutableConfiguration;
import javax.cache.spi.CachingProvider;
import java.lang.reflect.Field;
import java.net.UnknownHostException;
import java.util.Collection;
import static com.hazelcast.cache.CacheTestSupport.createServerCachingProvider;
import static org.junit.Assert.assertEquals;
/**
* Serialization test class for JCache
*/
@RunWith(HazelcastParallelClassRunner.class)
@Category({QuickTest.class, ParallelJVMTest.class})
public class CacheSerializationTest extends HazelcastTestSupport {
SerializationService service;
@Before
public void setup() {
SerializationServiceBuilder builder = new DefaultSerializationServiceBuilder();
service = builder.build();
}
@After
public void tearDown() {
}
@Test
public void testCacheRecord_withBinaryInMemoryData() {
String value = randomString();
CacheRecord cacheRecord = createRecord(InMemoryFormat.BINARY, value);
Data cacheRecordData = service.toData(cacheRecord);
CacheRecord deserialized = service.toObject(cacheRecordData);
assertEquals(value, service.toObject(deserialized.getValue()));
}
@Test
public void testCacheRecord_withObjectInMemoryData() {
String value = randomString();
CacheRecord cacheRecord = createRecord(InMemoryFormat.OBJECT, value);
Data cacheRecordData = service.toData(cacheRecord);
CacheRecord deserialized = service.toObject(cacheRecordData);
assertEquals(value, deserialized.getValue());
}
@Test
public void test_CacheReplicationOperation_serialization() throws Exception {
TestHazelcastInstanceFactory factory = new TestHazelcastInstanceFactory(1);
HazelcastInstance hazelcastInstance = factory.newHazelcastInstance();
try {
CachingProvider provider = createServerCachingProvider(hazelcastInstance);
CacheManager manager = provider.getCacheManager();
CompleteConfiguration configuration = new MutableConfiguration();
Cache cache1 = manager.createCache("cache1", configuration);
Cache cache2 = manager.createCache("cache2", configuration);
Cache cache3 = manager.createCache("cache3", configuration);
for (int i = 0; i < 1000; i++) {
cache1.put("key" + i, i);
cache2.put("key" + i, i);
cache3.put("key" + i, i);
}
HazelcastInstanceProxy proxy = (HazelcastInstanceProxy) hazelcastInstance;
Field original = HazelcastInstanceProxy.class.getDeclaredField("original");
original.setAccessible(true);
HazelcastInstanceImpl impl = (HazelcastInstanceImpl) original.get(proxy);
NodeEngineImpl nodeEngine = impl.node.nodeEngine;
CacheService cacheService = nodeEngine.getService(CacheService.SERVICE_NAME);
int partitionCount = nodeEngine.getPartitionService().getPartitionCount();
for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
CachePartitionSegment segment = cacheService.getSegment(partitionId);
int replicaIndex = 1;
Collection<ServiceNamespace> namespaces = segment.getAllNamespaces(replicaIndex);
if (CollectionUtil.isEmpty(namespaces)) {
continue;
}
<|fim▁hole|> service.toObject(serialized);
} catch (Exception e) {
throw new Exception("Partition: " + partitionId, e);
}
}
} finally {
factory.shutdownAll();
}
}
@Test
public void testCachePartitionEventData() throws UnknownHostException {
Address address = new Address("127.0.0.1", 5701);
Member member = new MemberImpl(address, MemberVersion.UNKNOWN, true);
CachePartitionEventData cachePartitionEventData = new CachePartitionEventData("test", 1, member);
CachePartitionEventData deserialized = service.toObject(cachePartitionEventData);
assertEquals(cachePartitionEventData, deserialized);
}
private CacheRecord createRecord(InMemoryFormat format, String value) {
CacheRecordFactory factory = new CacheRecordFactory(format, service);
return factory.newRecordWithExpiry(value, Clock.currentTimeMillis(), -1);
}
}<|fim▁end|> | CacheReplicationOperation operation = new CacheReplicationOperation();
operation.prepare(segment, namespaces, replicaIndex);
Data serialized = service.toData(operation);
try { |
<|file_name|>Error.cc<|end_file_name|><|fim▁begin|>#include <apertium_xml2cpp.h>
#include <string><|fim▁hole|>#include <iostream>
namespace apertium {
namespace xml2cpp {
} // namespace xml2cpp
} // namespace apertium<|fim▁end|> | |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Scrapy settings for DynamicItemsScrapy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'DynamicItemsScrapy'
SPIDER_MODULES = ['DynamicItemsScrapy.spiders']
NEWSPIDER_MODULE = 'DynamicItemsScrapy.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'DynamicItemsScrapy (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay<|fim▁hole|>#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'DynamicItemsScrapy.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'DynamicItemsScrapy.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'DynamicItemsScrapy.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'<|fim▁end|> | # See also autothrottle settings and docs |
<|file_name|>request.go<|end_file_name|><|fim▁begin|>//
// Last.Backend LLC CONFIDENTIAL
// __________________
//
// [2014] - [2019] Last.Backend LLC
// All Rights Reserved.
//
// NOTICE: All information contained herein is, and remains
// the property of Last.Backend LLC and its suppliers,
// if any. The intellectual and technical concepts contained
// herein are proprietary to Last.Backend LLC
// and its suppliers and may be covered by Russian Federation and Foreign Patents,
// patents in process, and are protected by trade secret or copyright law.
// Dissemination of this information or reproduction of this material
// is strictly forbidden unless prior written permission is obtained
// from Last.Backend LLC.
//
package request
import (
"bytes"
"context"
"encoding/json"
"fmt"
"golang.org/x/net/http2"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"strings"
)
type Request struct {
// required
client HTTPClient
verb string
baseURL *url.URL
pathPrefix string
params url.Values
headers http.Header
err error
body io.Reader
ctx context.Context
}
type HTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
func New(client HTTPClient, method string, baseURL *url.URL) *Request {
pathPrefix := "/"
if baseURL != nil {
pathPrefix = path.Join(pathPrefix, baseURL.Path)
}
r := &Request{
client: client,
verb: method,
baseURL: baseURL,
pathPrefix: pathPrefix,
}
return r
}
func (r *Request) Body(data []byte) *Request {
r.body = bytes.NewReader(data)
return r
}
func (r *Request) AddHeader(key, val string) *Request {
if r.headers == nil {
r.headers = make(map[string][]string)
}
r.headers.Add(key, val)
return r
}
func (r *Request) Do() Result {
var result Result
if r.err != nil {
return Result{err: r.err}
}
client := r.client
if client == nil {
panic("client not initialized")
}
u := r.URL().String()
req, err := http.NewRequest(r.verb, u, r.body)
if err != nil {
return Result{err: r.err}
}
if r.ctx != nil {
req = req.WithContext(r.ctx)
}
req.Header = r.headers
resp, err := client.Do(req)
if err != nil {
return Result{err: r.err}
}
result = r.transformResponse(resp, req)
return result
}
func (r *Request) JSON(success interface{}, failure interface{}) error {
client := r.client
if client == nil {
panic("client not initialized")
}
u := r.URL().String()
req, err := http.NewRequest(r.verb, u, r.body)
if err != nil {
return err
}
if r.ctx != nil {
req = req.WithContext(r.ctx)
}
req.Header = r.headers
resp, err := client.Do(req)
if err != nil {
if io.EOF == err || strings.Contains(err.Error(), "EOF") {
return nil
}
return err
}
<|fim▁hole|> return decodeJSON(resp, success, failure)
}
type Result struct {
body []byte
contentType string
err error
statusCode int
}
// Raw returns the raw result.
func (r Result) Raw() ([]byte, error) {
return r.body, r.err
}
// Raw returns the raw result.
func decodeJSON(r *http.Response, success interface{}, failure interface{}) error {
if code := r.StatusCode; 200 > code || code > 299 {
if failure == nil {
return nil
}
return decodeResponseJSON(r, failure)
}
if success == nil {
return nil
}
return decodeResponseJSON(r, success)
}
func (r Result) StatusCode() int {
return r.statusCode
}
func (r Result) Error() error {
return r.err
}
func (r *Request) Stream() (io.ReadCloser, *http.Response, error) {
if r.err != nil {
return nil, nil, r.err
}
u := r.URL().String()
req, err := http.NewRequest(r.verb, u, nil)
if err != nil {
return nil, nil, err
}
if r.ctx != nil {
req = req.WithContext(r.ctx)
}
req.Header = r.headers
client := r.client
if client == nil {
client = http.DefaultClient
}
res, err := client.Do(req)
if err != nil {
return nil, nil, err
}
switch {
case (res.StatusCode >= 200) && (res.StatusCode < 400):
return res.Body, res, nil
default:
defer res.Body.Close()
result := r.transformResponse(res, req)
err := result.Error()
if err == nil {
err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, u, string(result.body))
}
return nil, res, err
}
}
func (r *Request) Param(name, value string) *Request {
if r.params == nil {
r.params = make(url.Values)
}
r.params[name] = append(r.params[name], value)
return r
}
func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result {
var body []byte
if resp.Body != nil {
data, err := ioutil.ReadAll(resp.Body)
switch err.(type) {
case nil:
body = data
case http2.StreamError:
return Result{
err: fmt.Errorf("stream error %#v when reading", err),
}
default:
return Result{
err: fmt.Errorf("unexpected error %#v", err),
}
}
}
return Result{
body: body,
contentType: resp.Header.Get("Content-Type"),
statusCode: resp.StatusCode,
}
}
func decodeResponseJSON(r *http.Response, v interface{}) error {
err := json.NewDecoder(r.Body).Decode(v)
if err != nil && io.EOF == err {
return nil
}
return err
}
func (r *Request) URL() *url.URL {
p := r.pathPrefix
finalURL := &url.URL{}
if r.baseURL != nil {
*finalURL = *r.baseURL
}
finalURL.Path = p
query := url.Values{}
for key, values := range r.params {
for _, value := range values {
query.Add(key, value)
}
}
finalURL.RawQuery = query.Encode()
return finalURL
}<|fim▁end|> | |
<|file_name|>xss.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
__license__ = """<|fim▁hole|>GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Authors:
Jekkay Hu | jekkay<@>gmail.com
Daniel Garcia Garcia a.k.a cr0hn | cr0hn<@>cr0hn.com
Mario Vilas | mvilas<@>gmail.com
Golismero project site: http://golismero-project.com
Golismero project mail: [email protected]
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from . import HTTPInjection
#------------------------------------------------------------------------------
class XSS(HTTPInjection):
"""
Cross-Site Scripting.
Cross-site scripting vulnerabilities, also known as XSS, allow an attacker
to inject arbitrary HTML content into a web page. Typically an attacker
would inject JavaScript code in order to control the web application on
behalf of the user, or redirect the user to a malicious site.
There are several libraries and methods of filtering user input to prevent
XSS vulnerabilities. Use whichever is provided for your current programming
language and platform or, if none is available or feasible, try using
third party products. As a last resort, try developing your own XSS filter
using the guidelines provided by OWASP.
"""
DEFAULTS = HTTPInjection.DEFAULTS.copy()
DEFAULTS["cwe"] = ("CWE-79", "CWE-80")
DEFAULTS["cvss_base"] = "6.8"
DEFAULTS["references"] = (
"https://www.owasp.org/index.php/Cross_Site_Scripting_Flaw",
"https://www.owasp.org/index.php/Cross-site_Scripting_(XSS)",
"https://www.owasp.org/index.php/"
"XSS_(Cross_Site_Scripting)_Prevention_Cheat_Sheet",
"https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet",
)<|fim▁end|> | |
<|file_name|>shuffle_an_array.rs<|end_file_name|><|fim▁begin|>extern crate rand;
use rand::Rng;
fn swap(arr: &mut Vec<i64>, i: usize) {
let a = &arr[i];
arr[i] = arr[i + 1];
arr[i + 1] = *a;<|fim▁hole|>}
fn randomize(arr: &Vec<i64>) -> Vec<i64> {
let mut rng = rand::thread_rng();
let mut temp = arr.clone();
for i in 0..arr.len() - 1 {
let random_num = rng.gen::<u8>() % 2;
if random_num == 1 {
swap(&mut temp, i);
}
}
return temp;
}
fn main() {
println!("{:?}", randomize(&vec![1, 2, 3, 4, 5]));
}<|fim▁end|> | |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>'''
Setup.py script.
'''
__author__ = 'riko'
from Cython.Build import cythonize
import numpy
from setuptools import setup, Extension, find_packages
try:
use_cython = True
from Cython.Distutils import build_ext
except ImportError:
use_cython = False
ext = '.pyx' if use_cython else '.c'
ext_modules = [Extension("calculations", ["models/calculations/calculations"+ext])]
include_dirs = []
cmdclass = {}
if use_cython:<|fim▁hole|> ext_modules = cythonize(ext_modules, include_dirs=[numpy.get_include()])
include_dirs=[numpy.get_include()]
cmdclass.update({ 'build_ext': build_ext })
print ext_modules
setup(name='TennisModelling',
version='1.0',
description='Tennis modelling tool.',
author='Erik Grabljevec',
author_email='[email protected]',
url='https://github.com/erix5son/TennisModelling',
packages=['data_tools', 'models', 'ranking_systems'],
py_modules = ['settings'],
cmdclass=cmdclass,
ext_modules=ext_modules,
include_dirs=[include_dirs, numpy.get_include()]
)<|fim▁end|> | print "Doing extensions: ", ext_modules |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Library for performing speech recognition with the Google Speech Recognition API."""
__author__ = 'Anthony Zhang (Uberi)'
__version__ = '1.0.4'
__license__ = 'BSD'
import io, subprocess, wave, shutil
import math, audioop, collections
import json, urllib.request
#wip: filter out clicks and other too short parts
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
try:
import pyaudio
class Microphone(AudioSource):
def __init__(self, device_index = None):
self.device_index = device_index
self.format = pyaudio.paInt16 # 16-bit int sampling
self.SAMPLE_WIDTH = pyaudio.get_sample_size(self.format)
self.RATE = 16000 # sampling rate in Hertz
self.CHANNELS = 1 # mono audio
self.CHUNK = 1024 # number of frames stored in each buffer
self.audio = None
self.stream = None
def __enter__(self):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(
input_device_index = self.device_index,
format = self.format, rate = self.RATE, channels = self.CHANNELS, frames_per_buffer = self.CHUNK,
input = True, # stream is an input stream
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio.terminate()
except ImportError:
pass
class WavFile(AudioSource):
def __init__(self, filename_or_fileobject):
if isinstance(filename_or_fileobject, str):
self.filename = filename_or_fileobject
else:
self.filename = None
self.wav_file = filename_or_fileobject
self.stream = None
def __enter__(self):
if self.filename: self.wav_file = open(self.filename, "rb")
self.wav_reader = wave.open(self.wav_file, "rb")
self.SAMPLE_WIDTH = self.wav_reader.getsampwidth()
self.RATE = self.wav_reader.getframerate()
self.CHANNELS = self.wav_reader.getnchannels()
assert self.CHANNELS == 1 # audio must be mono
self.CHUNK = 4096
self.stream = WavFile.WavStream(self.wav_reader)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.filename: self.wav_file.close()
self.stream = None
class WavStream(object):
def __init__(self, wav_reader):
self.wav_reader = wav_reader
def read(self, size = -1):
if size == -1:
return self.wav_reader.readframes(self.wav_reader.getnframes())
return self.wav_reader.readframes(size)
class AudioData(object):
def __init__(self, rate, data):
self.rate = rate
self.data = data
class Recognizer(AudioSource):
def __init__(self, language = "fr-FR", key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"):
self.key = key
self.language = language
self.energy_threshold = 1500 # minimum audio energy to consider for recording
self.pause_threshold = 0.8 # seconds of quiet time before a phrase is considered complete
self.quiet_duration = 0.5 # amount of quiet time to keep on both sides of the recording
def samples_to_flac(self, source, frame_data):
import platform, os
with io.BytesIO() as wav_file:
with wave.open(wav_file, "wb") as wav_writer:
wav_writer.setsampwidth(source.SAMPLE_WIDTH)
wav_writer.setnchannels(source.CHANNELS)
wav_writer.setframerate(source.RATE)
wav_writer.writeframes(frame_data)
wav_data = wav_file.getvalue()
# determine which converter executable to use
system = platform.system()
path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored
if shutil.which("flac") is not None: # check for installed version first
flac_converter = shutil.which("flac")
elif system == "Windows" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}: # Windows NT, use the bundled FLAC conversion utility
flac_converter = os.path.join(path, "flac-win32.exe")
elif system == "Linux" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}:
flac_converter = os.path.join(path, "flac-linux-i386")
else:
raise ChildProcessError("FLAC conversion utility not available - consider installing the FLAC utility")
process = subprocess.Popen("\"%s\" --stdout --totally-silent --best -" % flac_converter, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
flac_data, stderr = process.communicate(wav_data)
return flac_data
def record(self, source, duration = None):
assert isinstance(source, AudioSource) and source.stream
frames = io.BytesIO()
seconds_per_buffer = source.CHUNK / source.RATE
elapsed_time = 0
while True: # loop for the total number of chunks needed
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def listen(self, source, timeout = None):
assert isinstance(source, AudioSource) and source.stream
# record audio data as raw samples
frames = collections.deque()
assert self.pause_threshold >= self.quiet_duration >= 0
seconds_per_buffer = source.CHUNK / source.RATE
pause_buffer_count = math.ceil(self.pause_threshold / seconds_per_buffer) # number of buffers of quiet audio before the phrase is complete
quiet_buffer_count = math.ceil(self.quiet_duration / seconds_per_buffer) # maximum number of buffers of quiet audio to retain before and after
elapsed_time = 0
# store audio input until the phrase starts
while True:<|fim▁hole|> if timeout and elapsed_time > timeout:
raise TimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
# check if the audio input has stopped being quiet
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
break
if len(frames) > quiet_buffer_count: # ensure we only keep the needed amount of quiet buffers
frames.popleft()
# read audio input until the phrase ends
pause_count = 0
while True:
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
# check if the audio input has gone quiet for longer than the pause threshold
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# obtain frame data
for i in range(quiet_buffer_count, pause_buffer_count): frames.pop() # remove extra quiet frames at the end
frame_data = b"".join(list(frames))
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def recognize(self, audio_data, show_all = False):
assert isinstance(audio_data, AudioData)
url = "http://www.google.com/speech-api/v2/recognize?client=chromium&lang=%s&key=%s" % (self.language, self.key)
self.request = urllib.request.Request(url, data = audio_data.data, headers = {"Content-Type": "audio/x-flac; rate=%s" % audio_data.rate})
# check for invalid key response from the server
try:
response = urllib.request.urlopen(self.request)
except:
raise KeyError("Server wouldn't respond (invalid key or quota has been maxed out)")
response_text = response.read().decode("utf-8")
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
# make sure we have a list of transcriptions
if "alternative" not in actual_result:
raise LookupError("Speech is unintelligible")
# return the best guess unless told to do otherwise
if not show_all:
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
return prediction["transcript"]
raise LookupError("Speech is unintelligible")
spoken_text = []
# check to see if Google thinks it's 100% correct
default_confidence = 0
if len(actual_result["alternative"])==1: default_confidence = 1
# return all the possibilities
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
spoken_text.append({"text":prediction["transcript"],"confidence":prediction["confidence"]})
else:
spoken_text.append({"text":prediction["transcript"],"confidence":default_confidence})
return spoken_text
if __name__ == "__main__":
r = Recognizer()
m = Microphone()
while True:
print("Say something!")
with m as source:
audio = r.listen(source)
print("Got it! Now to recognize it...")
try:
print("You said " + r.recognize(audio))
except LookupError:
print("Oops! Didn't catch that")<|fim▁end|> | # handle timeout if specified
elapsed_time += seconds_per_buffer |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
from lino.projects.std.settings import *
<|fim▁hole|>
class Site(Site):
title = "Lino@prj1"
# server_url = "https://prj1.mydomain.com"
SITE = Site(globals())
# locally override attributes of individual plugins
# SITE.plugins.finan.suggest_future_vouchers = True
# MySQL
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mysite', #database name
'USER': 'django',
'PASSWORD': 'my cool password',
'HOST': 'localhost',
'PORT': 3306,
'OPTIONS': {
"init_command": "SET storage_engine=MyISAM",
}
}
}<|fim▁end|> | import logging
logging.getLogger('weasyprint').setLevel("ERROR") # see #1462 |
<|file_name|>issue-28472.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that the visibility modifier is included in the span of foreign items.
extern {
fn foo();<|fim▁hole|>
pub //~ ERROR the name `foo` is defined multiple times
fn foo();
pub //~ ERROR the name `foo` is defined multiple times
static mut foo: u32;
}
fn main() {
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import random
import string
from django.contrib.auth.models import User
from django.test import TestCase
from longerusernameandemail.forms import UserCreationForm
def get_random_string(alpha=string.ascii_letters, length=100):
"Get a 'long', randmon string"
return ''.join([random.choice(alpha) for i in range(length)])
class LongerUsernameAndEmailTests(TestCase):
"""
Unit tests for longerusernameandemail app
"""
def setUp(self):
"""
creates a user with a terribly long username
"""
long_username = ''.join([str(i) for i in range(100)])
self.user = User.objects.create_user(
'test%s' % long_username,
'%[email protected]' % long_username,
'testpassword'
)
def testUserCreation(self):
"""
tests that self.user was successfully saved, and can be retrieved
"""
self.assertNotEqual(self.user, None)
# returns DoesNotExist error if the user wasn't created
User.objects.get(id=self.user.id)
class LongerUsernameAndEmailFormTests(TestCase):
"""
Unit tests for longerusernameandemail forms.
"""
def setUp(self):
# create a user with long username & email
self.user = User.objects.create_user(
'test%s' % get_random_string(),
'%[email protected]' % get_random_string(),
'testpassword',
)
def test_valid_new_user(self):
"test a new user with a long username and long email is valid"
data = {
'username': get_random_string(),
'email': '%[email protected]' % get_random_string(),
'password1': 'test',
'password2': 'test',
}
form = UserCreationForm(data)<|fim▁hole|> def test_invalid_new_user_email_collision(self):
"""
test we can't create a new user with the same email as an
existing user
"""
data = {
'username': 'anything',
'email': self.user.email,
'password1': 'test',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertIn('email', form.errors)
self.assertIn('already exists', form.errors['email'][0])
class LongerUsernameAndEmailAdminTests(TestCase):
"""
Functional tests for the django admin when longerusernameandemail
is enabled.
"""
urls = 'longerusernameandemail.tests.urls'
email = '[email protected]'
password = 'superuserpassowrd'
def setUp(self):
# create two users with long usernames & emails
self.user1 = User.objects.create_user(
'test%s' % get_random_string(),
'%[email protected]' % get_random_string(),
'testpassword',
)
self.user2 = User.objects.create_user(
'test%s' % get_random_string(),
'%[email protected]' % get_random_string(),
'testpassword',
)
# create superuser to do the actions, and log in as them
User.objects.create_superuser(self.email, self.email, self.password)
self.client.login(username=self.email, password=self.password)
def test_read_user_list(self):
"test we can read the list of users in the admin"
resp = self.client.get('/admin/auth/user/')
self.assertEqual(resp.status_code, 200)
def test_read_user(self):
"test we can read a particular user in the admin"
url = '/admin/auth/user/{}/'.format(self.user1.pk)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_create_user(self):
"test we can create a new user using the admin"
org_user_count = User.objects.count()
resp = self.client.post('/admin/auth/user/add/', data={
'username': 'test{}@example.com'.format(get_random_string()),
'email': 'test{}@example.com'.format(get_random_string()),
'password1': 'test',
'password2': 'test',
})
self.assertEqual(resp.status_code, 302)
self.assertEqual(User.objects.count(), org_user_count + 1)
def test_edit_user(self):
"test we can edit a particular user using the admin"
new_email = 'test{}@example.com'.format(get_random_string())
url = '/admin/auth/user/{}/'.format(self.user1.pk)
resp = self.client.post(url, {
'username': new_email,
'email': new_email,
'last_login_0': self.user1.last_login.strftime('%F'),
'last_login_1': self.user1.last_login.strftime('%T'),
'date_joined_0': self.user1.date_joined.strftime('%F'),
'date_joined_1': self.user1.date_joined.strftime('%T'),
})
self.assertEqual(resp.status_code, 302)
self.assertEqual(User.objects.filter(email=new_email).count(), 1)
def test_delete_user(self):
"test we can delete a new user using the admin"
user = User.objects.create_user('[email protected]', '[email protected]', 'pwd')
org_user_count = User.objects.count()
url = '/admin/auth/user/{}/delete/'.format(user.pk)
resp = self.client.post(url, {'post': 'yes'})
self.assertEqual(resp.status_code, 302)
self.assertEqual(User.objects.count(), org_user_count - 1)<|fim▁end|> | self.assertTrue(form.is_valid())
|
<|file_name|>e10.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python -tt -Wall
def prime_sieve(upper):<|fim▁hole|> def next_prime():
for i,v in enumerate(marked):
if not v:
yield i+2
next_prime_gen = next_prime()
for p in next_prime_gen:
for n in xrange(2*p - 2, len(marked), p):
marked[n] = True
yield p
def main():
print(sum(prime_sieve(2000000)))
if __name__ == '__main__':
main()<|fim▁end|> | marked = [False] * (upper-2)
|
<|file_name|>boolean.rs<|end_file_name|><|fim▁begin|>//! [Lambda-encoded booleans](https://en.wikipedia.org/wiki/Church_encoding#Church_Booleans)
use crate::term::Term::*;
use crate::term::{abs, app, Term};
/// A lambda-encoded boolean `true`.
///
/// TRUE ≡ λab.a ≡ λ λ 2
pub fn tru() -> Term {
abs!(2, Var(2))
}
/// A lambda-encoded boolean `false`.
///
/// FALSE ≡ λab.b ≡ λ λ 1
pub fn fls() -> Term {
abs!(2, Var(1))
}
/// Applied to two lambda-encoded booleans it returns their lambda-encoded conjunction.
///
/// AND ≡ λpq.p q p ≡ λ λ 2 1 2
///
/// # Examples
/// ```
/// use lambda_calculus::data::boolean::{and, tru, fls};
/// use lambda_calculus::*;
///
/// assert_eq!(beta(app!(and(), tru(), tru()), NOR, 0), tru());
/// assert_eq!(beta(app!(and(), tru(), fls()), NOR, 0), fls());
/// assert_eq!(beta(app!(and(), fls(), tru()), NOR, 0), fls());
/// assert_eq!(beta(app!(and(), fls(), fls()), NOR, 0), fls());
/// ```
pub fn and() -> Term {
abs!(2, app!(Var(2), Var(1), Var(2)))
}
/// Applied to two lambda-encoded booleans it returns their lambda-encoded disjunction.
///
/// OR ≡ λpq.p p q ≡ λ λ 2 2 1
///
/// # Examples
/// ```
/// use lambda_calculus::data::boolean::{or, tru, fls};
/// use lambda_calculus::*;
///
/// assert_eq!(beta(app!(or(), tru(), tru()), NOR, 0), tru());
/// assert_eq!(beta(app!(or(), tru(), fls()), NOR, 0), tru());
/// assert_eq!(beta(app!(or(), fls(), tru()), NOR, 0), tru());
/// assert_eq!(beta(app!(or(), fls(), fls()), NOR, 0), fls());
/// ```
pub fn or() -> Term {
abs!(2, app!(Var(2), Var(2), Var(1)))
}
/// Applied to a lambda-encoded boolean it returns its lambda-encoded negation.
///
/// NOT ≡ λp.p FALSE TRUE ≡ λ 1 FALSE TRUE
///
/// # Examples
/// ```
/// use lambda_calculus::data::boolean::{not, tru, fls};
/// use lambda_calculus::*;
///
/// assert_eq!(beta(app!(not(), tru()), NOR, 0), fls());
/// assert_eq!(beta(app!(not(), fls()), NOR, 0), tru());
/// ```
pub fn not() -> Term {
abs(app!(Var(1), fls(), tru()))
}
/// Applied to two lambda-encoded booleans it returns their lambda-encoded exclusive disjunction.
///
/// XOR ≡ λpq.p (NOT q) q ≡ λ λ 2 (NOT 1) 1
///
/// # Examples
/// ```
/// use lambda_calculus::data::boolean::{xor, tru, fls};
/// use lambda_calculus::*;
///
/// assert_eq!(beta(app!(xor(), tru(), tru()), NOR, 0), fls());
/// assert_eq!(beta(app!(xor(), tru(), fls()), NOR, 0), tru());
/// assert_eq!(beta(app!(xor(), fls(), tru()), NOR, 0), tru());
/// assert_eq!(beta(app!(xor(), fls(), fls()), NOR, 0), fls());
/// ```
pub fn xor() -> Term {
abs!(2, app!(Var(2), app!(not(), Var(1)), Var(1)))
}
/// Applied to two lambda-encoded booleans it returns their lambda-encoded joint denial.
///
/// NOR ≡ λpq.p p q FALSE TRUE ≡ λ λ 2 2 1 FALSE TRUE
///
/// # Examples
/// ```
/// use lambda_calculus::data::boolean::{nor, tru, fls};
/// use lambda_calculus::*;
///
/// assert_eq!(beta(app!(nor(), tru(), tru()), NOR, 0), fls());
/// assert_eq!(beta(app!(nor(), tru(), fls()), NOR, 0), fls());
/// assert_eq!(beta(app!(nor(), fls(), tru()), NOR, 0), fls());
/// assert_eq!(beta(app!(nor(), fls(), fls()), NOR, 0), tru());
/// ```
pub fn nor() -> Term {
abs!(2, app!(Var(2), Var(2), Var(1), fls(), tru()))
}
/// Applied to two lambda-encoded booleans it returns their lambda-encoded exclusive joint denial
/// (`nor`); it is also known as `iff`.
///
/// XNOR ≡ λpq.p q (NOT q) ≡ λ λ 2 1 (NOT 1)
///
/// # Examples
/// ```
/// use lambda_calculus::data::boolean::{xnor, tru, fls};
/// use lambda_calculus::*;
///
/// assert_eq!(beta(app!(xnor(), tru(), tru()), NOR, 0), tru());
/// assert_eq!(beta(app!(xnor(), tru(), fls()), NOR, 0), fls());
/// assert_eq!(beta(app!(xnor(), fls(), tru()), NOR, 0), fls());
/// assert_eq!(beta(app!(xnor(), fls(), fls()), NOR, 0), tru());
/// ```
pub fn xnor() -> Term {
abs!(2, app!(Var(2), Var(1), app(not(), Var(1))))
}
/// Applied to two lambda-encoded booleans it returns their lambda-encoded alternative denial.
///
/// NAND ≡ λpq.p q p FALSE TRUE ≡ λ λ 2 1 2 FALSE TRUE
///
/// # Examples
/// ```
/// use lambda_calculus::data::boolean::{nand, tru, fls};
/// use lambda_calculus::*;
///
/// assert_eq!(beta(app!(nand(), tru(), tru()), NOR, 0), fls());
/// assert_eq!(beta(app!(nand(), tru(), fls()), NOR, 0), tru());
/// assert_eq!(beta(app!(nand(), fls(), tru()), NOR, 0), tru());
/// assert_eq!(beta(app!(nand(), fls(), fls()), NOR, 0), tru());
/// ```
pub fn nand() -> Term {
abs!(2, app!(Var(2), Var(1), Var(2), fls(), tru()))<|fim▁hole|>/// is true or the second one if the predicate is false.
///
/// IF_ELSE ≡ λpab.p a b ≡ λ λ λ 3 2 1
///
/// # Examples
/// ```
/// use lambda_calculus::data::boolean::{if_else, tru, fls};
/// use lambda_calculus::*;
///
/// assert_eq!(beta(app!(if_else(), tru(), tru(), fls()), NOR, 0), tru());
/// assert_eq!(beta(app!(if_else(), fls(), tru(), fls()), NOR, 0), fls());
/// ```
pub fn if_else() -> Term {
abs!(3, app!(Var(3), Var(2), Var(1)))
}
/// Applied to two lambda-encoded booleans it returns their lambda-encoded implication.
///
/// IMPLY ≡ λpq.OR (NOT p) q ≡ λ λ OR (NOT 2) 1
///
/// # Examples
/// ```
/// use lambda_calculus::data::boolean::{imply, tru, fls};
/// use lambda_calculus::*;
///
/// assert_eq!(beta(app!(imply(), tru(), tru()), NOR, 0), tru());
/// assert_eq!(beta(app!(imply(), tru(), fls()), NOR, 0), fls());
/// assert_eq!(beta(app!(imply(), fls(), tru()), NOR, 0), tru());
/// assert_eq!(beta(app!(imply(), fls(), fls()), NOR, 0), tru());
/// ```
pub fn imply() -> Term {
abs!(2, app!(or(), app(not(), Var(2)), Var(1)))
}
impl From<bool> for Term {
fn from(b: bool) -> Term {
if b {
tru()
} else {
fls()
}
}
}<|fim▁end|> | }
/// Applied to a lambda-encoded predicate and two terms it returns the first one if the predicate |
<|file_name|>appengine.settings.ts<|end_file_name|><|fim▁begin|>import type { IProviderSettings } from '@spinnaker/core';
import { SETTINGS } from '@spinnaker/core';
export interface IAppengineProviderSettings extends IProviderSettings {
defaults: {
account?: string;
};
}
export const AppengineProviderSettings: IAppengineProviderSettings = (SETTINGS.providers
.appengine as IAppengineProviderSettings) || { defaults: {} };<|fim▁hole|> AppengineProviderSettings.resetToOriginal = SETTINGS.resetProvider('appengine');
}<|fim▁end|> | if (AppengineProviderSettings) { |
<|file_name|>find.dutch.test.ts<|end_file_name|><|fim▁begin|>import { findWord, PartialFindOptions, FindFullResult } from './find';
import * as fs from 'fs-extra';
import * as zlib from 'zlib';
import { importTrie } from './importExport';
import { TrieNode } from './TrieNode';
import * as path from 'path';
import { normalizeWordToLowercase } from './util';
const dutchDictionary = path.join(__dirname, ...'../../../Samples/dicts/nl_compound_trie3.trie.gz'.split('/'));
describe('Validate findWord', () => {
const pTrie = readTrie(dutchDictionary);
test('find exact words preserve case', async () => {
const trie = await pTrie;
// cspell:ignore aanvaardbaard
// Code is not allowed as a full word.
expect(
findWord(trie, 'aanvaardbaard', {
matchCase: true,
compoundMode: 'none',
})
).toEqual({ ...frFound(false), forbidden: true });
expect(findWord(trie, 'code', { matchCase: true, compoundMode: 'none' })).toEqual({
found: 'code',
compoundUsed: false,
forbidden: false,
caseMatched: true,
});
expect(
findWord(trie, 'code', {
matchCase: true,
compoundMode: 'compound',
})
).toEqual(frFound('code'));
});
const tests: [string, PartialFindOptions, FindFullResult][] = [
['Code', { matchCase: true, compoundMode: 'none' }, frNotFound({ forbidden: false })],
['code', { matchCase: true, compoundMode: 'none' }, frFound('code', { forbidden: false })],
['cafe', { matchCase: true, compoundMode: 'none' }, frNotFound({ forbidden: false })],
[
'cafe',
{ matchCase: false, compoundMode: 'none' },
frFound('cafe', { caseMatched: false, forbidden: undefined }),
],
// Compounding enabled, but matching whole words (compounding not used).
['Code', { matchCase: true, compoundMode: 'compound' }, frCompoundFound(false)],
['code', { matchCase: true, compoundMode: 'compound' }, frFound('code')],
['cafe', { matchCase: true, compoundMode: 'compound' }, frFound(false)],
['cafe', { matchCase: false, compoundMode: 'compound' }, frFound('cafe', { caseMatched: false })],
// compound words
testCompound('buurtbewoner'), // cspell:ignore buurtbewoner
testCompound('buurtbewoners'), // cspell:ignore buurtbewoners
// forbidden compounds
[
'aanvaardbaard',
{ matchCase: true, compoundMode: 'compound' },
frCompoundFound('aanvaardbaard', { forbidden: true }),
],
];
test.each(tests)('%s %j %j', async (word, options, exResult) => {
const trie = await pTrie;
expect(findWord(trie, word, options)).toEqual(exResult);
});
test.each(sampleWords())('Find Word: %s', async (word) => {
const trie = await pTrie;
const word2 = word[0].toLowerCase() + word.slice(1);
const r1 = findWord(trie, word, {
matchCase: true,
compoundMode: 'compound',
});
const r2 =
r1.found || word === word2
? r1
: ((word = word2),
findWord(trie, word, {
matchCase: true,
compoundMode: 'compound',
}));
expect(r2.found).toEqual(word);
expect(r2.forbidden).toBeFalsy();
});
test.each(sampleWords())('Find Word case insensitive: %s', async (word) => {
const trie = await pTrie;
const r = findWord(trie, normalizeWordToLowercase(word), {
matchCase: false,
compoundMode: 'compound',
});
expect(r.found).toEqual(normalizeWordToLowercase(word));
expect(r.forbidden).toBeFalsy();
});
test.each(sampleMisspellings())(`Check misspelled words: %s`, async (word) => {
const trie = await pTrie;
const word2 = word[0].toLowerCase() + word.slice(1);
const r1 = findWord(trie, word, {
matchCase: true,
compoundMode: 'compound',
});
const r2 =
r1.found || word === word2
? r1
: ((word = word2),
findWord(trie, word, {
matchCase: true,
compoundMode: 'compound',
}));
expect(r2.found).toEqual(false);
expect(r2.forbidden).toBeFalsy();
});
test.each(sampleMisspellings())(`Check misspelled words case insensitive: %s`, async (word) => {
const trie = await pTrie;
const r = findWord(trie, normalizeWordToLowercase(word), {
matchCase: false,
compoundMode: 'compound',
});
expect(r.found).toEqual(false);
expect(r.forbidden).toBeFalsy();
});
});
function sampleMisspellings(): string[] {
// cspell:disable
const text = `
nieuwjaarnacht
burgersmeester
buurtsbewoners
herdenkingbijeenkomst
pankoekhuis
blauwetram
`;
// cspell:enable
return processText(text);
}
function sampleWords(): string[] {
// cspell:disable
const text = `
Arnhem basisschool burgemeester buurtbewoners haarvaten herdenkingsbijeenkomst
nabestaanden onmenselijke slachtoffers uitgebrande verdachten voorbereiden
exposé
De Australische marine heeft honderden inwoners en toeristen uit de kustplaats geëvacueerd
zo'n mensen vluchtten maandagavond naar het strand toen bosbranden het dorp deels in de as legden en de
vluchtwegen blokkeerden.
In het zuidoosten van Australië zijn meer dan 200 brandhaarden.
De autoriteiten vrezen dat de situatie alleen maar erger wordt door de hoge
temperaturen en harde wind die voor dit weekend worden verwacht.
In de deelstaat New Zuid Wales, waar Sydney ligt, geldt de noodtoestand.
Het Nederlandse ministerie van Buitenlandse Zaken adviseert in het gebied alleen noodzakelijke reizen te maken.
Nooit eerder waren de jaarlijkse bosbranden in Australië zo ernstig.
Tot nu toe is een gebied groter dan Nederland afgebrand en zijn meer dan 1400 huizen verwoest.
Ten minste negentien mensen kwamen om en er zijn tientallen vermisten.
Verdachten flatbrand Arnhem hebben ook levenslang, zegt Kinderombudsman<|fim▁hole|> Meervoudige persoonlijkheidsstoornissen
Zandzeep mineraalwatersteenstralen
Randjongerenhangplekkenbeleidsambtenarensalarisbesprekingsafspraken
Invaliditeitsuitkeringshoofdkwartiervestigingsgebouwfundamentenblauwdruk
Hottentottententententoonstellingsterrein
Vervoerdersaansprakelijkheidsverzekering
Bestuurdersaansprakelijkheidsverzekering
Overeenstemmingsbeoordelingsprocedures
`;
// cspell:enable
return processText(text);
}
function processText(text: string): string[] {
return [
...new Set(
text
.replace(/[.0-9,"“():]/g, ' ')
.split(/\s+/)
.sort()
.filter((a) => !!a)
),
];
}
function testCompound(word: string, found = true): [string, PartialFindOptions, FindFullResult] {
return [word, { matchCase: true, compoundMode: 'compound' }, frCompoundFound(found && word, { forbidden: false })];
}
type PartialFindFullResult = Partial<FindFullResult>;
function fr({
found = false,
forbidden = undefined,
compoundUsed = false,
caseMatched = true,
}: PartialFindFullResult): FindFullResult {
return {
found,
forbidden,
compoundUsed,
caseMatched,
};
}
function frNotFound(r: PartialFindFullResult = {}): FindFullResult {
const { found = false } = r;
return fr({ ...r, found });
}
function frFound(found: string | false, r: PartialFindFullResult = {}): FindFullResult {
return fr({
...r,
found,
});
}
function frCompoundFound(found: string | false, r: PartialFindFullResult = {}): FindFullResult {
const { compoundUsed = true } = r;
return frFound(found, { ...r, compoundUsed });
}
async function readTrie(filename: string): Promise<TrieNode> {
const lines = await readTextFile(filename);
return importTrie(lines);
}
function readTextFile(filename: string): Promise<string[]> {
const lines = fs
.readFile(filename)
.then((buffer) => (/\.gz$/.test(filename) ? zlib.gunzipSync(buffer) : buffer))
.then((buffer) => buffer.toString('utf8'))
.then((content) => content.split(/\r?\n/g));
return lines;
}<|fim▁end|> |
Lange woorden:
Kindercarnavalsoptochtenvoorbereidingswerkzaamheden |
<|file_name|>hashing_analyzer.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Hashing analyzer."""
import unittest
from plaso.containers import analyzer_result
from plaso.analyzers import hashing_analyzer
from plaso.analyzers.hashers import manager
from tests import test_lib as shared_test_lib
from tests.analyzers.hashers import manager as manager_test
class HashingAnalyzerTest(shared_test_lib.BaseTestCase):
"""Test the Hashing analyzer."""
# pylint: disable=protected-access
@classmethod
def setUpClass(cls):
"""Makes preparations before running any of the tests."""
manager.HashersManager.RegisterHasher(manager_test.TestHasher)
@classmethod
def tearDownClass(cls):
"""Cleans up after running all tests."""
manager.HashersManager.DeregisterHasher(manager_test.TestHasher)
def testHasherInitialization(self):<|fim▁hole|> """Test the creation of the analyzer, and the enabling of hashers."""
analyzer = hashing_analyzer.HashingAnalyzer()
analyzer.SetHasherNames('testhash')
self.assertEqual(len(analyzer._hashers), 1)
def testHashFile(self):
"""Tests that results are produced correctly."""
analyzer = hashing_analyzer.HashingAnalyzer()
analyzer.SetHasherNames('testhash')
analyzer.Analyze('test data')
results = analyzer.GetResults()
first_result = results[0]
self.assertIsInstance(first_result, analyzer_result.AnalyzerResult)
self.assertEqual(first_result.analyzer_name, 'hashing')
self.assertEqual(first_result.attribute_name, 'testhash_hash')
self.assertEqual(first_result.attribute_value, '4')
self.assertEqual(len(results), 1)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | |
<|file_name|>PolyhedraRenderer.js<|end_file_name|><|fim▁begin|>Clazz.declarePackage ("J.renderspecial");
Clazz.load (["J.render.ShapeRenderer"], "J.renderspecial.PolyhedraRenderer", ["JU.P3i", "JM.Atom", "JU.C"], function () {
c$ = Clazz.decorateAsClass (function () {
this.drawEdges = 0;
this.isAll = false;
this.frontOnly = false;
this.screens = null;
this.vibs = false;
Clazz.instantialize (this, arguments);
}, J.renderspecial, "PolyhedraRenderer", J.render.ShapeRenderer);
Clazz.overrideMethod (c$, "render",
function () {
var polyhedra = this.shape;
var polyhedrons = polyhedra.polyhedrons;
this.drawEdges = polyhedra.drawEdges;
this.g3d.addRenderer (1073742182);
this.vibs = (this.ms.vibrations != null && this.tm.vibrationOn);
var needTranslucent = false;
for (var i = polyhedra.polyhedronCount; --i >= 0; ) if (polyhedrons[i].isValid && this.render1 (polyhedrons[i])) needTranslucent = true;
return needTranslucent;
});
Clazz.defineMethod (c$, "render1",
function (p) {
if (p.visibilityFlags == 0) return false;
var colixes = (this.shape).colixes;
var iAtom = p.centralAtom.i;
var colix = (colixes == null || iAtom >= colixes.length ? 0 : colixes[iAtom]);
colix = JU.C.getColixInherited (colix, p.centralAtom.colixAtom);
var needTranslucent = false;
if (JU.C.renderPass2 (colix)) {
needTranslucent = true;
} else if (!this.g3d.setC (colix)) {
return false;
}var vertices = p.vertices;
var planes;
if (this.screens == null || this.screens.length < vertices.length) {
this.screens = new Array (vertices.length);
for (var i = vertices.length; --i >= 0; ) this.screens[i] = new JU.P3i ();
}planes = p.planes;
for (var i = vertices.length; --i >= 0; ) {
<|fim▁hole|>if (atom == null) {
this.tm.transformPtScr (vertices[i], this.screens[i]);
} else if (!atom.isVisible (this.myVisibilityFlag)) {
this.screens[i].setT (this.vibs && atom.hasVibration () ? this.tm.transformPtVib (atom, this.ms.vibrations[atom.i]) : this.tm.transformPt (atom));
} else {
this.screens[i].set (atom.sX, atom.sY, atom.sZ);
}}
this.isAll = (this.drawEdges == 1);
this.frontOnly = (this.drawEdges == 2);
if (!needTranslucent || this.g3d.setC (colix)) for (var i = 0, j = 0; j < planes.length; ) this.fillFace (p.normixes[i++], this.screens[planes[j++]], this.screens[planes[j++]], this.screens[planes[j++]]);
if (p.colixEdge != 0) colix = p.colixEdge;
if (this.g3d.setC (JU.C.getColixTranslucent3 (colix, false, 0))) for (var i = 0, j = 0; j < planes.length; ) this.drawFace (p.normixes[i++], this.screens[planes[j++]], this.screens[planes[j++]], this.screens[planes[j++]]);
return needTranslucent;
}, "J.shapespecial.Polyhedron");
Clazz.defineMethod (c$, "drawFace",
function (normix, A, B, C) {
if (this.isAll || this.frontOnly && this.vwr.gdata.isDirectedTowardsCamera (normix)) {
this.drawCylinderTriangle (A.x, A.y, A.z, B.x, B.y, B.z, C.x, C.y, C.z);
}}, "~N,JU.P3i,JU.P3i,JU.P3i");
Clazz.defineMethod (c$, "drawCylinderTriangle",
function (xA, yA, zA, xB, yB, zB, xC, yC, zC) {
var d = (this.g3d.isAntialiased () ? 6 : 3);
this.g3d.fillCylinderScreen (3, d, xA, yA, zA, xB, yB, zB);
this.g3d.fillCylinderScreen (3, d, xB, yB, zB, xC, yC, zC);
this.g3d.fillCylinderScreen (3, d, xA, yA, zA, xC, yC, zC);
}, "~N,~N,~N,~N,~N,~N,~N,~N,~N");
Clazz.defineMethod (c$, "fillFace",
function (normix, A, B, C) {
this.g3d.fillTriangleTwoSided (normix, A.x, A.y, A.z, B.x, B.y, B.z, C.x, C.y, C.z);
}, "~N,JU.P3i,JU.P3i,JU.P3i");
});<|fim▁end|> | var atom = (Clazz.instanceOf (vertices[i], JM.Atom) ? vertices[i] : null);
|
<|file_name|>InsertionSort.java<|end_file_name|><|fim▁begin|>package tutorialHorizon.arrays;
/**
* Created by archithrapaka on 7/4/17.
*/
public class InsertionSort {
public static void insertionSort(int[] items, int n) {
int i, j;
for (i = 1; i < n; i++) {
j = i;
while (j > 0 && (items[j] < items[j - 1])) {
swap(items, j, j - 1);
j--;
}
}
}
public static void swap(int[] items, int i, int j) {
int temp = items[i];<|fim▁hole|> }
public static void display(int[] a) {
for (int i : a) {
System.out.print(i + " ");
}
}
public static void main(String[] args) {
int[] a = {100, 4, 30, 15, 98, 3};
insertionSort(a, a.length);
display(a);
}
}<|fim▁end|> | items[i] = items[j];
items[j] = temp; |
<|file_name|>unused-macro-with-bad-frag-spec.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.<|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused_macros)]
// Issue #21370
macro_rules! test {
($wrong:t_ty) => () //~ ERROR invalid fragment specifier `t_ty`
}
fn main() { }<|fim▁end|> | // |
<|file_name|>DdosProtectionPlanTests.java<|end_file_name|><|fim▁begin|>// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.resourcemanager.network;
<|fim▁hole|>import com.azure.resourcemanager.test.utils.TestUtilities;
import com.azure.core.management.Region;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class DdosProtectionPlanTests extends NetworkManagementTest {
@Test
public void canCRUDDdosProtectionPlan() throws Exception {
String ppName = generateRandomResourceName("ddosplan", 15);
DdosProtectionPlan pPlan =
networkManager
.ddosProtectionPlans()
.define(ppName)
.withRegion(locationOrDefault(Region.US_SOUTH_CENTRAL))
.withNewResourceGroup(rgName)
.withTag("tag1", "value1")
.create();
Assertions.assertEquals("value1", pPlan.tags().get("tag1"));
PagedIterable<DdosProtectionPlan> ppList = networkManager.ddosProtectionPlans().list();
Assertions.assertTrue(TestUtilities.getSize(ppList) > 0);
ppList = networkManager.ddosProtectionPlans().listByResourceGroup(rgName);
Assertions.assertTrue(TestUtilities.getSize(ppList) > 0);
networkManager.ddosProtectionPlans().deleteById(pPlan.id());
ppList = networkManager.ddosProtectionPlans().listByResourceGroup(rgName);
Assertions.assertTrue(TestUtilities.isEmpty(ppList));
}
}<|fim▁end|> | import com.azure.core.http.rest.PagedIterable;
import com.azure.resourcemanager.network.models.DdosProtectionPlan; |
<|file_name|>net.rs<|end_file_name|><|fim▁begin|>//! A collection of traits abstracting over Listeners and Streams.
use std::any::{Any, TypeId};
use std::fmt;
use std::io::{self, ErrorKind, Read, Write};
use std::net::{SocketAddr, ToSocketAddrs, TcpStream, TcpListener, Shutdown};
use std::mem;
#[cfg(feature = "openssl")]
pub use self::openssl::Openssl;
use typeable::Typeable;
use traitobject;
/// The write-status indicating headers have not been written.
pub enum Fresh {}
/// The write-status indicating headers have been written.
pub enum Streaming {}
/// An abstraction to listen for connections on a certain port.
pub trait NetworkListener: Clone {
/// The stream produced for each connection.
type Stream: NetworkStream + Send + Clone;
/// Listens on a socket.
//fn listen<To: ToSocketAddrs>(&mut self, addr: To) -> io::Result<Self::Acceptor>;
/// Returns an iterator of streams.
fn accept(&mut self) -> ::Result<Self::Stream>;
/// Get the address this Listener ended up listening on.
fn local_addr(&mut self) -> io::Result<SocketAddr>;
/// Closes the Acceptor, so no more incoming connections will be handled.
// fn close(&mut self) -> io::Result<()>;
/// Returns an iterator over incoming connections.
fn incoming(&mut self) -> NetworkConnections<Self> {
NetworkConnections(self)
}
}
/// An iterator wrapper over a NetworkAcceptor.
pub struct NetworkConnections<'a, N: NetworkListener + 'a>(&'a mut N);
impl<'a, N: NetworkListener + 'a> Iterator for NetworkConnections<'a, N> {
type Item = ::Result<N::Stream>;
fn next(&mut self) -> Option<::Result<N::Stream>> {
Some(self.0.accept())
}
}
/// An abstraction over streams that a Server can utilize.
pub trait NetworkStream: Read + Write + Any + Send + Typeable {
/// Get the remote address of the underlying connection.
fn peer_addr(&mut self) -> io::Result<SocketAddr>;
/// This will be called when Stream should no longer be kept alive.
#[inline]
fn close(&mut self, _how: Shutdown) -> io::Result<()> {
Ok(())
}
}
/// A connector creates a NetworkStream.
pub trait NetworkConnector {
/// Type of Stream to create
type Stream: Into<Box<NetworkStream + Send>>;
/// Connect to a remote address.
fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result<Self::Stream>;
}
impl<T: NetworkStream + Send> From<T> for Box<NetworkStream + Send> {
fn from(s: T) -> Box<NetworkStream + Send> {
Box::new(s)
}
}
impl fmt::Debug for Box<NetworkStream + Send> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.pad("Box<NetworkStream>")
}
}
impl NetworkStream + Send {
unsafe fn downcast_ref_unchecked<T: 'static>(&self) -> &T {
mem::transmute(traitobject::data(self))
}
unsafe fn downcast_mut_unchecked<T: 'static>(&mut self) -> &mut T {
mem::transmute(traitobject::data_mut(self))
}
unsafe fn downcast_unchecked<T: 'static>(self: Box<NetworkStream + Send>) -> Box<T> {
let raw: *mut NetworkStream = mem::transmute(self);
mem::transmute(traitobject::data_mut(raw))
}
}
impl NetworkStream + Send {
/// Is the underlying type in this trait object a T?
#[inline]
pub fn is<T: Any>(&self) -> bool {
(*self).get_type() == TypeId::of::<T>()
}
/// If the underlying type is T, get a reference to the contained data.
#[inline]
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
if self.is::<T>() {
Some(unsafe { self.downcast_ref_unchecked() })
} else {
None
}
}
/// If the underlying type is T, get a mutable reference to the contained
/// data.
#[inline]
pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
if self.is::<T>() {
Some(unsafe { self.downcast_mut_unchecked() })
} else {
None
}
}
/// If the underlying type is T, extract it.
#[inline]
pub fn downcast<T: Any>(self: Box<NetworkStream + Send>)
-> Result<Box<T>, Box<NetworkStream + Send>> {
if self.is::<T>() {
Ok(unsafe { self.downcast_unchecked() })
} else {
Err(self)
}
}
}
/// A `NetworkListener` for `HttpStream`s.
pub struct HttpListener(TcpListener);
impl Clone for HttpListener {
#[inline]
fn clone(&self) -> HttpListener {
HttpListener(self.0.try_clone().unwrap())
}
}
impl HttpListener {
/// Start listening to an address over HTTP.
pub fn new<To: ToSocketAddrs>(addr: To) -> ::Result<HttpListener> {
Ok(HttpListener(try!(TcpListener::bind(addr))))
}
}
impl NetworkListener for HttpListener {
type Stream = HttpStream;
#[inline]
fn accept(&mut self) -> ::Result<HttpStream> {
Ok(HttpStream(try!(self.0.accept()).0))
}
#[inline]
fn local_addr(&mut self) -> io::Result<SocketAddr> {
self.0.local_addr()
}
}
/// A wrapper around a TcpStream.
pub struct HttpStream(pub TcpStream);
impl Clone for HttpStream {
#[inline]
fn clone(&self) -> HttpStream {
HttpStream(self.0.try_clone().unwrap())
}
}
impl fmt::Debug for HttpStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("HttpStream(_)")
}
}
impl Read for HttpStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
}
impl Write for HttpStream {
#[inline]
fn write(&mut self, msg: &[u8]) -> io::Result<usize> {
self.0.write(msg)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.0.flush()
}
}
#[cfg(windows)]
impl ::std::os::windows::io::AsRawSocket for HttpStream {
fn as_raw_socket(&self) -> ::std::os::windows::io::RawSocket {
self.0.as_raw_socket()
}
}
#[cfg(unix)]
impl ::std::os::unix::io::AsRawFd for HttpStream {
fn as_raw_fd(&self) -> i32 {
self.0.as_raw_fd()
}
}
impl NetworkStream for HttpStream {
#[inline]
fn peer_addr(&mut self) -> io::Result<SocketAddr> {
self.0.peer_addr()
}
#[inline]
fn close(&mut self, how: Shutdown) -> io::Result<()> {
match self.0.shutdown(how) {
Ok(_) => Ok(()),
// see https://github.com/hyperium/hyper/issues/508
Err(ref e) if e.kind() == ErrorKind::NotConnected => Ok(()),
err => err
}
}
}
/// A connector that will produce HttpStreams.
#[derive(Debug, Clone, Default)]
pub struct HttpConnector;
impl NetworkConnector for HttpConnector {
type Stream = HttpStream;
fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result<HttpStream> {
let addr = &(host, port);
Ok(try!(match scheme {
"http" => {
debug!("http scheme");
Ok(HttpStream(try!(TcpStream::connect(addr))))
},
_ => {
Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid scheme for Http"))
}
}))
}
}
/// An abstraction to allow any SSL implementation to be used with HttpsStreams.
pub trait Ssl {
/// The protected stream.
type Stream: NetworkStream + Send + Clone;
/// Wrap a client stream with SSL.
fn wrap_client(&self, stream: HttpStream, host: &str) -> ::Result<Self::Stream>;
/// Wrap a server stream with SSL.
fn wrap_server(&self, stream: HttpStream) -> ::Result<Self::Stream>;
}
/// A stream over the HTTP protocol, possibly protected by SSL.
#[derive(Debug, Clone)]
pub enum HttpsStream<S: NetworkStream> {
/// A plain text stream.
Http(HttpStream),
/// A stream protected by SSL.
Https(S)
}
impl<S: NetworkStream> Read for HttpsStream<S> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match *self {
HttpsStream::Http(ref mut s) => s.read(buf),
HttpsStream::Https(ref mut s) => s.read(buf)
}
}
}
impl<S: NetworkStream> Write for HttpsStream<S> {
#[inline]
fn write(&mut self, msg: &[u8]) -> io::Result<usize> {
match *self {
HttpsStream::Http(ref mut s) => s.write(msg),
HttpsStream::Https(ref mut s) => s.write(msg)
}
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
match *self {
HttpsStream::Http(ref mut s) => s.flush(),
HttpsStream::Https(ref mut s) => s.flush()
}
}
}
impl<S: NetworkStream> NetworkStream for HttpsStream<S> {
#[inline]
fn peer_addr(&mut self) -> io::Result<SocketAddr> {<|fim▁hole|> }
}
#[inline]
fn close(&mut self, how: Shutdown) -> io::Result<()> {
match *self {
HttpsStream::Http(ref mut s) => s.close(how),
HttpsStream::Https(ref mut s) => s.close(how)
}
}
}
/// A Http Listener over SSL.
#[derive(Clone)]
pub struct HttpsListener<S: Ssl> {
listener: HttpListener,
ssl: S,
}
impl<S: Ssl> HttpsListener<S> {
/// Start listening to an address over HTTPS.
pub fn new<To: ToSocketAddrs>(addr: To, ssl: S) -> ::Result<HttpsListener<S>> {
HttpListener::new(addr).map(|l| HttpsListener {
listener: l,
ssl: ssl
})
}
}
impl<S: Ssl + Clone> NetworkListener for HttpsListener<S> {
type Stream = S::Stream;
#[inline]
fn accept(&mut self) -> ::Result<S::Stream> {
self.listener.accept().and_then(|s| self.ssl.wrap_server(s))
}
#[inline]
fn local_addr(&mut self) -> io::Result<SocketAddr> {
self.listener.local_addr()
}
}
/// A connector that can protect HTTP streams using SSL.
#[derive(Debug, Default)]
pub struct HttpsConnector<S: Ssl> {
ssl: S
}
impl<S: Ssl> HttpsConnector<S> {
/// Create a new connector using the provided SSL implementation.
pub fn new(s: S) -> HttpsConnector<S> {
HttpsConnector { ssl: s }
}
}
impl<S: Ssl> NetworkConnector for HttpsConnector<S> {
type Stream = HttpsStream<S::Stream>;
fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result<Self::Stream> {
let addr = &(host, port);
if scheme == "https" {
debug!("https scheme");
let stream = HttpStream(try!(TcpStream::connect(addr)));
self.ssl.wrap_client(stream, host).map(HttpsStream::Https)
} else {
HttpConnector.connect(host, port, scheme).map(HttpsStream::Http)
}
}
}
#[cfg(not(feature = "openssl"))]
#[doc(hidden)]
pub type DefaultConnector = HttpConnector;
#[cfg(feature = "openssl")]
#[doc(hidden)]
pub type DefaultConnector = HttpsConnector<self::openssl::Openssl>;
#[cfg(feature = "openssl")]
mod openssl {
use std::io;
use std::net::{SocketAddr, Shutdown};
use std::path::Path;
use std::sync::Arc;
use openssl::ssl::{Ssl, SslContext, SslStream, SslMethod, SSL_VERIFY_NONE};
use openssl::ssl::error::StreamError as SslIoError;
use openssl::ssl::error::SslError;
use openssl::x509::X509FileType;
use super::{NetworkStream, HttpStream};
/// An implementation of `Ssl` for OpenSSL.
///
/// # Example
///
/// ```no_run
/// use hyper::Server;
/// use hyper::net::Openssl;
///
/// let ssl = Openssl::with_cert_and_key("/home/foo/cert", "/home/foo/key").unwrap();
/// Server::https("0.0.0.0:443", ssl).unwrap();
/// ```
///
/// For complete control, create a `SslContext` with the options you desire
/// and then create `Openssl { context: ctx }
#[derive(Debug, Clone)]
pub struct Openssl {
/// The `SslContext` from openssl crate.
pub context: Arc<SslContext>
}
impl Default for Openssl {
fn default() -> Openssl {
Openssl {
context: Arc::new(SslContext::new(SslMethod::Sslv23).unwrap_or_else(|e| {
// if we cannot create a SslContext, that's because of a
// serious problem. just crash.
panic!("{}", e)
}))
}
}
}
impl Openssl {
/// Ease creating an `Openssl` with a certificate and key.
pub fn with_cert_and_key<C, K>(cert: C, key: K) -> Result<Openssl, SslError>
where C: AsRef<Path>, K: AsRef<Path> {
let mut ctx = try!(SslContext::new(SslMethod::Sslv23));
try!(ctx.set_cipher_list("DEFAULT"));
try!(ctx.set_certificate_file(cert.as_ref(), X509FileType::PEM));
try!(ctx.set_private_key_file(key.as_ref(), X509FileType::PEM));
ctx.set_verify(SSL_VERIFY_NONE, None);
Ok(Openssl { context: Arc::new(ctx) })
}
}
impl super::Ssl for Openssl {
type Stream = SslStream<HttpStream>;
fn wrap_client(&self, stream: HttpStream, host: &str) -> ::Result<Self::Stream> {
let ssl = try!(Ssl::new(&self.context));
try!(ssl.set_hostname(host));
SslStream::connect(ssl, stream).map_err(From::from)
}
fn wrap_server(&self, stream: HttpStream) -> ::Result<Self::Stream> {
match SslStream::accept(&*self.context, stream) {
Ok(ssl_stream) => Ok(ssl_stream),
Err(SslIoError(e)) => {
Err(io::Error::new(io::ErrorKind::ConnectionAborted, e).into())
},
Err(e) => Err(e.into())
}
}
}
impl<S: NetworkStream> NetworkStream for SslStream<S> {
#[inline]
fn peer_addr(&mut self) -> io::Result<SocketAddr> {
self.get_mut().peer_addr()
}
fn close(&mut self, how: Shutdown) -> io::Result<()> {
self.get_mut().close(how)
}
}
}
#[cfg(test)]
mod tests {
use mock::MockStream;
use super::{NetworkStream};
#[test]
fn test_downcast_box_stream() {
// FIXME: Use Type ascription
let stream: Box<NetworkStream + Send> = Box::new(MockStream::new());
let mock = stream.downcast::<MockStream>().ok().unwrap();
assert_eq!(mock, Box::new(MockStream::new()));
}
#[test]
fn test_downcast_unchecked_box_stream() {
// FIXME: Use Type ascription
let stream: Box<NetworkStream + Send> = Box::new(MockStream::new());
let mock = unsafe { stream.downcast_unchecked::<MockStream>() };
assert_eq!(mock, Box::new(MockStream::new()));
}
}<|fim▁end|> | match *self {
HttpsStream::Http(ref mut s) => s.peer_addr(),
HttpsStream::Https(ref mut s) => s.peer_addr() |
<|file_name|>line_modification.cc<|end_file_name|><|fim▁begin|>#include "line_modification.hh"
#include "buffer.hh"
#include "unit_tests.hh"
namespace Kakoune
{
static LineModification make_line_modif(const Buffer::Change& change)
{
LineCount num_added = 0, num_removed = 0;
if (change.type == Buffer::Change::Insert)
{
num_added = change.end.line - change.begin.line;
// inserted a new line at buffer end but end coord is on same line
if (change.at_end and change.end.column != 0)
++num_added;
}
else
{
num_removed = change.end.line - change.begin.line;
// removed last line, but end coord is on same line
if (change.at_end and change.end.column != 0)
++num_removed;
}
// modified a line
if (not change.at_end and
(change.begin.column != 0 or change.end.column != 0))
{
++num_removed;
++num_added;
}
return { change.begin.line, change.begin.line, num_removed, num_added };
}
Vector<LineModification> compute_line_modifications(const Buffer& buffer, size_t timestamp)
{
Vector<LineModification> res;
for (auto& buf_change : buffer.changes_since(timestamp))
{
auto change = make_line_modif(buf_change);
auto pos = std::upper_bound(res.begin(), res.end(), change.new_line,
[](const LineCount& l, const LineModification& c)
{ return l < c.new_line; });
if (pos != res.begin())
{
auto& prev = *(pos-1);
if (change.new_line <= prev.new_line + prev.num_added)
{
--pos;
const LineCount removed_from_previously_added_by_pos =
clamp(pos->new_line + pos->num_added - change.new_line,
0_line, std::min(pos->num_added, change.num_removed));
pos->num_removed += change.num_removed - removed_from_previously_added_by_pos;
pos->num_added += change.num_added - removed_from_previously_added_by_pos;
}
else
{
change.old_line -= prev.diff();
pos = res.insert(pos, change);
}
}
else
pos = res.insert(pos, change);
auto next = pos + 1;
auto diff = buf_change.end.line - buf_change.begin.line;
if (buf_change.type == Buffer::Change::Erase)
{
auto delend = std::upper_bound(next, res.end(), change.new_line + change.num_removed,
[](const LineCount& l, const LineModification& c)
{ return l < c.new_line; });
for (auto it = next; it != delend; ++it)
{
const LineCount removed_from_previously_added_by_it =
std::min(it->num_added, change.new_line + change.num_removed - it->new_line);
pos->num_removed += it->num_removed - removed_from_previously_added_by_it;
pos->num_added += it->num_added - removed_from_previously_added_by_it;
}
next = res.erase(next, delend);
for (auto it = next; it != res.end(); ++it)
it->new_line -= diff;
}
else
{
for (auto it = next; it != res.end(); ++it)
it->new_line += diff;
}
}
return res;
}
bool operator==(const LineModification& lhs, const LineModification& rhs)
{
return std::tie(lhs.old_line, lhs.new_line, lhs.num_removed, lhs.num_added) ==
std::tie(rhs.old_line, rhs.new_line, rhs.num_removed, rhs.num_added);
}
UnitTest test_line_modifications{[]()
{
{
Buffer buffer("test", Buffer::Flags::None, "line 1\nline 2\n");
auto ts = buffer.timestamp();
buffer.erase(buffer.iterator_at({1, 0}), buffer.iterator_at({2, 0}));
auto modifs = compute_line_modifications(buffer, ts);
kak_assert(modifs.size() == 1 && modifs[0] == LineModification{ 1 COMMA 1 COMMA 1 COMMA 0 });
}
{
Buffer buffer("test", Buffer::Flags::None, "line 1\nline 2\n");
auto ts = buffer.timestamp();
buffer.insert(buffer.iterator_at({1, 7}), "line 3");
auto modifs = compute_line_modifications(buffer, ts);
kak_assert(modifs.size() == 1 && modifs[0] == LineModification{ 2 COMMA 2 COMMA 0 COMMA 1 });
}
{
Buffer buffer("test", Buffer::Flags::None, "line 1\nline 2\nline 3\n");
auto ts = buffer.timestamp();
buffer.insert(buffer.iterator_at({1, 4}), "hoho\nhehe");
buffer.erase(buffer.iterator_at({0, 0}), buffer.iterator_at({1, 0}));
auto modifs = compute_line_modifications(buffer, ts);
kak_assert(modifs.size() == 1 && modifs[0] == LineModification{ 0 COMMA 0 COMMA 2 COMMA 2 });
}
{
Buffer buffer("test", Buffer::Flags::None, "line 1\nline 2\nline 3\nline 4\n");
auto ts = buffer.timestamp();
buffer.erase(buffer.iterator_at({0,0}), buffer.iterator_at({3,0}));
buffer.insert(buffer.iterator_at({1,0}), "newline 1\nnewline 2\nnewline 3\n");
buffer.erase(buffer.iterator_at({0,0}), buffer.iterator_at({1,0}));
{
auto modifs = compute_line_modifications(buffer, ts);
kak_assert(modifs.size() == 1 && modifs[0] == LineModification{ 0 COMMA 0 COMMA 4 COMMA 3 });
}
buffer.insert(buffer.iterator_at({3,0}), "newline 4\n");
{
auto modifs = compute_line_modifications(buffer, ts);
kak_assert(modifs.size() == 1 && modifs[0] == LineModification{ 0 COMMA 0 COMMA 4 COMMA 4 });
}
}
{
Buffer buffer("test", Buffer::Flags::None, "line 1\n");
auto ts = buffer.timestamp();
buffer.insert(buffer.iterator_at({0,0}), "n");
buffer.insert(buffer.iterator_at({0,1}), "e");
buffer.insert(buffer.iterator_at({0,2}), "w");
auto modifs = compute_line_modifications(buffer, ts);
kak_assert(modifs.size() == 1 && modifs[0] == LineModification{ 0 COMMA 0 COMMA 1 COMMA 1 });<|fim▁hole|>}};
}<|fim▁end|> | } |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import sys
import warnings
try:
# Use setuptools if available, for install_requires (among other things).
import setuptools
from setuptools import setup
except ImportError:
setuptools = None
from distutils.core import setup
# The following code is copied from
# https://github.com/mongodb/mongo-python-driver/blob/master/setup.py
# to support installing without the extension on platforms where
# no compiler is available.
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError
from distutils.errors import DistutilsPlatformError, DistutilsExecError
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
build_errors = (CCompilerError, DistutilsExecError,
DistutilsPlatformError, IOError)
else:
build_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class custom_build_ext(build_ext):
"""Allow C extension building to fail.
The C extension speeds up websocket masking, but is not essential.
"""
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for Tornado to run,
although they do result in significant speed improvements for
websockets.
%s
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your<|fim▁hole|>version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
RedHat, CentOS, and Fedora users should issue the following command:
$ sudo yum install gcc python-devel
If you are seeing this message on OSX please read the documentation
here:
http://api.mongodb.org/python/current/installation.html#osx
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("Extension modules",
"There was an issue with "
"your platform configuration"
" - see above."))
def build_extension(self, ext):
name = ext.name
if sys.version_info[:3] >= (2, 4, 0):
try:
build_ext.build_extension(self, ext)
except build_errors:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("The %s extension "
"module" % (name,),
"The output above "
"this warning shows how "
"the compilation "
"failed."))
else:
warnings.warn(self.warning_message % ("The %s extension "
"module" % (name,),
"Please use Python >= 2.4 "
"to take advantage of the "
"extension."))
kwargs = {}
version = "0.1.3"
with open("README.rst") as f:
kwargs["long_description"] = f.read()
if setuptools is not None:
# If setuptools is not available, you're on your own for dependencies.
install_requires = ["tornado", "celery", "pika"]
kwargs["install_requires"] = install_requires
setup(
name="totoro",
version=version,
packages=["totoro", "totoro.test", "totoro.test.celery_tasks"],
extras_require={
"redis": ["redis", "tornado-redis"]
},
author="Alex Lee",
author_email="[email protected]",
url="https://github.com/Strawhatfy/totoro",
license="http://www.apache.org/licenses/LICENSE-2.0",
description="Celery integration with Tornado",
keywords=['tornado', 'celery', 'amqp', 'redis'],
classifiers=[
'License :: OSI Approved :: Apache Software License',
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython"
],
cmdclass={"build_ext": custom_build_ext},
**kwargs
)<|fim▁end|> | |
<|file_name|>detect.rs<|end_file_name|><|fim▁begin|>/* Copyright (C) 2019 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// written by Giuseppe Longo <[email protected]>
use crate::core::Direction;
use crate::sip::sip::SIPTransaction;
use std::ptr;
#[no_mangle]
pub unsafe extern "C" fn rs_sip_tx_get_method(
tx: &mut SIPTransaction,
buffer: *mut *const u8,
buffer_len: *mut u32,
) -> u8 {
if let Some(ref r) = tx.request {
let m = &r.method;
if m.len() > 0 {
*buffer = m.as_ptr();
*buffer_len = m.len() as u32;
return 1;
}
}
*buffer = ptr::null();
*buffer_len = 0;
return 0;
}
#[no_mangle]
pub unsafe extern "C" fn rs_sip_tx_get_uri(
tx: &mut SIPTransaction,
buffer: *mut *const u8,
buffer_len: *mut u32,
) -> u8 {
if let Some(ref r) = tx.request {
let p = &r.path;
if p.len() > 0 {
*buffer = p.as_ptr();<|fim▁hole|> }
}
*buffer = ptr::null();
*buffer_len = 0;
return 0;
}
#[no_mangle]
pub unsafe extern "C" fn rs_sip_tx_get_protocol(
tx: &mut SIPTransaction,
buffer: *mut *const u8,
buffer_len: *mut u32,
direction: u8,
) -> u8 {
match direction.into() {
Direction::ToServer => {
if let Some(ref r) = tx.request {
let v = &r.version;
if v.len() > 0 {
*buffer = v.as_ptr();
*buffer_len = v.len() as u32;
return 1;
}
}
}
Direction::ToClient => {
if let Some(ref r) = tx.response {
let v = &r.version;
if v.len() > 0 {
*buffer = v.as_ptr();
*buffer_len = v.len() as u32;
return 1;
}
}
}
}
*buffer = ptr::null();
*buffer_len = 0;
return 0;
}
#[no_mangle]
pub unsafe extern "C" fn rs_sip_tx_get_stat_code(
tx: &mut SIPTransaction,
buffer: *mut *const u8,
buffer_len: *mut u32,
) -> u8 {
if let Some(ref r) = tx.response {
let c = &r.code;
if c.len() > 0 {
*buffer = c.as_ptr();
*buffer_len = c.len() as u32;
return 1;
}
}
*buffer = ptr::null();
*buffer_len = 0;
return 0;
}
#[no_mangle]
pub unsafe extern "C" fn rs_sip_tx_get_stat_msg(
tx: &mut SIPTransaction,
buffer: *mut *const u8,
buffer_len: *mut u32,
) -> u8 {
if let Some(ref r) = tx.response {
let re = &r.reason;
if re.len() > 0 {
*buffer = re.as_ptr();
*buffer_len = re.len() as u32;
return 1;
}
}
*buffer = ptr::null();
*buffer_len = 0;
return 0;
}
#[no_mangle]
pub unsafe extern "C" fn rs_sip_tx_get_request_line(
tx: &mut SIPTransaction,
buffer: *mut *const u8,
buffer_len: *mut u32,
) -> u8 {
if let Some(ref r) = tx.request_line {
if r.len() > 0 {
*buffer = r.as_ptr();
*buffer_len = r.len() as u32;
return 1;
}
}
*buffer = ptr::null();
*buffer_len = 0;
return 0;
}
#[no_mangle]
pub unsafe extern "C" fn rs_sip_tx_get_response_line(
tx: &mut SIPTransaction,
buffer: *mut *const u8,
buffer_len: *mut u32,
) -> u8 {
if let Some(ref r) = tx.response_line {
if r.len() > 0 {
*buffer = r.as_ptr();
*buffer_len = r.len() as u32;
return 1;
}
}
*buffer = ptr::null();
*buffer_len = 0;
return 0;
}<|fim▁end|> | *buffer_len = p.len() as u32;
return 1; |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>extern crate fall_parse;
extern crate fall_tree;
pub mod arith;
pub mod sexp;
pub mod weird;
pub fn match_ast(actual: &str, expected: &str) {
let actual = actual.trim();
let expected = expected.trim();
if actual != expected {<|fim▁hole|><|fim▁end|> | panic!("Actual:\n{}\nExpected:\n{}\n", actual, expected)
}
} |
<|file_name|>glusterfs.go<|end_file_name|><|fim▁begin|>/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"fmt"
"math"
"os"
"path"
"runtime"
"strconv"
dstrings "strings"
"sync"
"github.com/golang/glog"
gcli "github.com/heketi/heketi/client/api/go-client"
gapi "github.com/heketi/heketi/pkg/glusterfs/api"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&glusterfsPlugin{host: nil, gidTable: make(map[string]*MinMaxAllocator)}}
}
type glusterfsPlugin struct {
host volume.VolumeHost
gidTable map[string]*MinMaxAllocator
gidTableLock sync.Mutex
}
var _ volume.VolumePlugin = &glusterfsPlugin{}
var _ volume.PersistentVolumePlugin = &glusterfsPlugin{}
var _ volume.DeletableVolumePlugin = &glusterfsPlugin{}
var _ volume.ProvisionableVolumePlugin = &glusterfsPlugin{}
var _ volume.ExpandableVolumePlugin = &glusterfsPlugin{}
var _ volume.Provisioner = &glusterfsVolumeProvisioner{}
var _ volume.Deleter = &glusterfsVolumeDeleter{}
const (
glusterfsPluginName = "kubernetes.io/glusterfs"
volPrefix = "vol_"
dynamicEpSvcPrefix = "glusterfs-dynamic"
replicaCount = 3
durabilityType = "replicate"
secretKeyName = "key" // key name used in secret
gciLinuxGlusterMountBinaryPath = "/sbin/mount.glusterfs"
defaultGidMin = 2000
defaultGidMax = math.MaxInt32
// maxCustomEpNamePrefix is the maximum number of chars.
// which can be used as ep/svc name prefix. This number is carved
// out from below formula.
// max length of name of an ep - length of pvc uuid
// where max length of name of an ep is 63 and length of uuid is 37
maxCustomEpNamePrefixLen = 26
// absoluteGidMin/Max are currently the same as the
// default values, but they play a different role and
// could take a different value. Only thing we need is:
// absGidMin <= defGidMin <= defGidMax <= absGidMax
absoluteGidMin = 2000
absoluteGidMax = math.MaxInt32
linuxGlusterMountBinary = "mount.glusterfs"
heketiAnn = "heketi-dynamic-provisioner"
glusterTypeAnn = "gluster.org/type"
glusterDescAnn = "Gluster-Internal: Dynamically provisioned PV"
heketiVolIDAnn = "gluster.kubernetes.io/heketi-volume-id"
)
func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *glusterfsPlugin) GetPluginName() string {
return glusterfsPluginName
}
func (plugin *glusterfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
var endpointName string
var endpointsNsPtr *string
volPath, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
endpointName = spec.Volume.Glusterfs.EndpointsName
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Glusterfs != nil {
endpointName = spec.PersistentVolume.Spec.Glusterfs.EndpointsName
endpointsNsPtr = spec.PersistentVolume.Spec.Glusterfs.EndpointsNamespace
if endpointsNsPtr != nil && *endpointsNsPtr != "" {
return fmt.Sprintf("%v:%v:%v", endpointName, *endpointsNsPtr, volPath), nil
}
return "", fmt.Errorf("invalid endpointsnamespace in provided glusterfs PV spec")
} else {
return "", fmt.Errorf("unable to fetch required parameters from provided glusterfs spec")
}
return fmt.Sprintf("%v:%v", endpointName, volPath), nil
}
func (plugin *glusterfsPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Glusterfs != nil) ||
(spec.Volume != nil && spec.Volume.Glusterfs != nil)
}
func (plugin *glusterfsPlugin) RequiresRemount() bool {
return false
}
func (plugin *glusterfsPlugin) SupportsMountOption() bool {
return true
}
func (plugin *glusterfsPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *glusterfsPlugin) RequiresFSResize() bool {
return false
}
func (plugin *glusterfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
epName, epNamespace, err := plugin.getEndpointNameAndNamespace(spec, pod.Namespace)
if err != nil {
return nil, err
}
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("failed to get kube client to initialize mounter")
}
ep, err := kubeClient.Core().Endpoints(epNamespace).Get(epName, metav1.GetOptions{})
if err != nil {
glog.Errorf("failed to get endpoint %s: %v", epName, err)
return nil, err
}
glog.V(4).Infof("glusterfs pv endpoint %v", ep)
return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *glusterfsPlugin) getEndpointNameAndNamespace(spec *volume.Spec, defaultNamespace string) (string, string, error) {
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
endpoints := spec.Volume.Glusterfs.EndpointsName
if endpoints == "" {
return "", "", fmt.Errorf("no glusterFS endpoint specified")
}
return endpoints, defaultNamespace, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Glusterfs != nil {
endpoints := spec.PersistentVolume.Spec.Glusterfs.EndpointsName
endpointsNs := defaultNamespace
overriddenNs := spec.PersistentVolume.Spec.Glusterfs.EndpointsNamespace
if overriddenNs != nil {
if len(*overriddenNs) > 0 {
endpointsNs = *overriddenNs
} else {
return "", "", fmt.Errorf("endpointnamespace field set, but no endpointnamespace specified")
}
}
return endpoints, endpointsNs, nil
}
return "", "", fmt.Errorf("Spec does not reference a GlusterFS volume type")
}
func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) {
volPath, readOnly, err := getVolumeInfo(spec)
if err != nil {
glog.Errorf("failed to get volumesource : %v", err)
return nil, err
}
return &glusterfsMounter{
glusterfs: &glusterfs{
volName: spec.Name(),
mounter: mounter,
pod: pod,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(pod.UID, strings.EscapeQualifiedNameForDisk(glusterfsPluginName), spec.Name())),
},
hosts: ep,
path: volPath,
readOnly: readOnly,
mountOptions: volutil.MountOptionFromSpec(spec),
}, nil
}
func (plugin *glusterfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &glusterfsUnmounter{&glusterfs{
volName: volName,
mounter: mounter,
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}},
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(podUID, strings.EscapeQualifiedNameForDisk(glusterfsPluginName), volName)),
}}, nil
}
func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
// To reconstruct volume spec we need endpoint where fetching endpoint from mount
// string looks to be impossible, so returning error.
return nil, fmt.Errorf("impossible to reconstruct glusterfs volume spec from volume mountpath")
}
// Glusterfs volumes represent a bare host file or directory mount of an Glusterfs export.
type glusterfs struct {
volName string
pod *v1.Pod
mounter mount.Interface
plugin *glusterfsPlugin
volume.MetricsProvider
}
type glusterfsMounter struct {
*glusterfs
hosts *v1.Endpoints
path string
readOnly bool
mountOptions []string
}
var _ volume.Mounter = &glusterfsMounter{}
func (b *glusterfsMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: false,
SupportsSELinux: false,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *glusterfsMounter) CanMount() error {
exe := b.plugin.host.GetExec(b.plugin.GetPluginName())
switch runtime.GOOS {
case "linux":
if _, err := exe.Run("test", "-x", gciLinuxGlusterMountBinaryPath); err != nil {
return fmt.Errorf("Required binary %s is missing", gciLinuxGlusterMountBinaryPath)
}
}
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *glusterfsMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("mount setup: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
return err
}
if !notMnt {
return nil
}
if err := os.MkdirAll(dir, 0750); err != nil {
return err
}
err = b.setUpAtInternal(dir)
if err == nil {
return nil
}
// Cleanup upon failure.
volutil.UnmountPath(dir, b.mounter)
return err
}
func (glusterfsVolume *glusterfs) GetPath() string {
name := glusterfsPluginName
return glusterfsVolume.plugin.host.GetPodVolumeDir(glusterfsVolume.pod.UID, strings.EscapeQualifiedNameForDisk(name), glusterfsVolume.volName)
}
type glusterfsUnmounter struct {
*glusterfs
}
var _ volume.Unmounter = &glusterfsUnmounter{}
func (c *glusterfsUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *glusterfsUnmounter) TearDownAt(dir string) error {
return volutil.UnmountPath(dir, c.mounter)
}
func (b *glusterfsMounter) setUpAtInternal(dir string) error {
var errs error
options := []string{}
hasLogFile := false
hasLogLevel := false
log := ""
if b.readOnly {
options = append(options, "ro")
}
// Check for log-file,log-level options existence in user supplied mount options, if provided, use those.
for _, userOpt := range b.mountOptions {
switch {
case dstrings.HasPrefix(userOpt, "log-file"):
glog.V(4).Infof("log-file mount option has provided")
hasLogFile = true
case dstrings.HasPrefix(userOpt, "log-level"):
glog.V(4).Infof("log-level mount option has provided")
hasLogLevel = true
}
}
// If logfile has not been provided, create driver specific log file.
if !hasLogFile {
log = ""
p := path.Join(b.glusterfs.plugin.host.GetPluginDir(glusterfsPluginName), b.glusterfs.volName)
if err := os.MkdirAll(p, 0750); err != nil {
return fmt.Errorf("failed to create directory %v: %v", p, err)
}
// adding log-level ERROR to remove noise
// and more specific log path so each pod has
// its own log based on PV + Pod
log = path.Join(p, b.pod.Name+"-glusterfs.log")
// Use derived log file in gluster fuse mount
options = append(options, "log-file="+log)
}
if !hasLogLevel {
options = append(options, "log-level=ERROR")
}
var addrlist []string
if b.hosts == nil {
return fmt.Errorf("glusterfs endpoint is nil in mounter")
}
addr := sets.String{}
if b.hosts.Subsets != nil {
for _, s := range b.hosts.Subsets {
for _, a := range s.Addresses {
if !addr.Has(a.IP) {
addr.Insert(a.IP)
addrlist = append(addrlist, a.IP)
}
}
}
}
//Add backup-volfile-servers and auto_unmount options.
options = append(options, "backup-volfile-servers="+dstrings.Join(addrlist[:], ":"))
options = append(options, "auto_unmount")
mountOptions := volutil.JoinMountOptions(b.mountOptions, options)
// with `backup-volfile-servers` mount option in place, it is not required to
// iterate over all the servers in the addrlist. A mount attempt with this option
// will fetch all the servers mentioned in the backup-volfile-servers list.
// Refer to backup-volfile-servers @ http://docs.gluster.org/en/latest/Administrator%20Guide/Setting%20Up%20Clients/
if (len(addrlist) > 0) && (addrlist[0] != "") {
ip := addrlist[0]
errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", mountOptions)
if errs == nil {
glog.Infof("successfully mounted directory %s", dir)
return nil
}
if dstrings.Contains(errs.Error(), "Invalid option auto_unmount") ||
dstrings.Contains(errs.Error(), "Invalid argument") {
// Give a try without `auto_unmount` mount option, because
// it could be that gluster fuse client is older version and
// mount.glusterfs is unaware of `auto_unmount`.
noAutoMountOptions := make([]string, 0, len(mountOptions))
for _, opt := range mountOptions {
if opt != "auto_unmount" {
noAutoMountOptions = append(noAutoMountOptions, opt)
}
}
errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", noAutoMountOptions)
if errs == nil {
glog.Infof("successfully mounted %s", dir)
return nil
}
}
} else {
return fmt.Errorf("failed to execute mount command:[no valid ipaddress found in endpoint address list]")
}
// Failed mount scenario.
// Since glusterfs does not return error text
// it all goes in a log file, we will read the log file
logErr := readGlusterLog(log, b.pod.Name)
if logErr != nil {
return fmt.Errorf("mount failed: %v the following error information was pulled from the glusterfs log to help diagnose this issue: %v", errs, logErr)
}
return fmt.Errorf("mount failed: %v", errs)
}
//getVolumeInfo returns 'path' and 'readonly' field values from the provided glusterfs spec.
func getVolumeInfo(spec *volume.Spec) (string, bool, error) {
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
return spec.Volume.Glusterfs.Path, spec.Volume.Glusterfs.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Glusterfs != nil {
return spec.PersistentVolume.Spec.Glusterfs.Path, spec.ReadOnly, nil
}
return "", false, fmt.Errorf("Spec does not reference a Glusterfs volume type")
}
func (plugin *glusterfsPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options)
}
func (plugin *glusterfsPlugin) newProvisionerInternal(options volume.VolumeOptions) (volume.Provisioner, error) {
return &glusterfsVolumeProvisioner{
glusterfsMounter: &glusterfsMounter{
glusterfs: &glusterfs{
plugin: plugin,
},
},
options: options,
}, nil
}
type provisionerConfig struct {
url string
user string
userKey string
secretNamespace string
secretName string
secretValue string
clusterID string
gidMin int
gidMax int
volumeType gapi.VolumeDurabilityInfo
volumeOptions []string
volumeNamePrefix string
thinPoolSnapFactor float32
customEpNamePrefix string
}
type glusterfsVolumeProvisioner struct {
*glusterfsMounter
provisionerConfig
options volume.VolumeOptions
}
func convertGid(gidString string) (int, error) {
gid64, err := strconv.ParseInt(gidString, 10, 32)
if err != nil {
return 0, fmt.Errorf("failed to parse gid %v: %v", gidString, err)
}
if gid64 < 0 {
return 0, fmt.Errorf("negative GIDs %v are not allowed", gidString)
}
// ParseInt returns a int64, but since we parsed only
// for 32 bit, we can cast to int without loss:
gid := int(gid64)
return gid, nil
}
func convertVolumeParam(volumeString string) (int, error) {
count, err := strconv.Atoi(volumeString)
if err != nil {
return 0, fmt.Errorf("failed to parse volumestring %q: %v", volumeString, err)
}
if count < 0 {
return 0, fmt.Errorf("negative values are not allowed")
}
return count, nil
}
func (plugin *glusterfsPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec)
}
func (plugin *glusterfsPlugin) newDeleterInternal(spec *volume.Spec) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Glusterfs == nil {
return nil, fmt.Errorf("spec.PersistentVolume.Spec.Glusterfs is nil")
}
return &glusterfsVolumeDeleter{
glusterfsMounter: &glusterfsMounter{
glusterfs: &glusterfs{
volName: spec.Name(),
plugin: plugin,
},
path: spec.PersistentVolume.Spec.Glusterfs.Path,
},
spec: spec.PersistentVolume,
}, nil
}
type glusterfsVolumeDeleter struct {
*glusterfsMounter
provisionerConfig
spec *v1.PersistentVolume
}
func (d *glusterfsVolumeDeleter) GetPath() string {
name := glusterfsPluginName
return d.plugin.host.GetPodVolumeDir(d.glusterfsMounter.glusterfs.pod.UID, strings.EscapeQualifiedNameForDisk(name), d.glusterfsMounter.glusterfs.volName)
}
// Traverse the PVs, fetching all the GIDs from those
// in a given storage class, and mark them in the table.
func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAllocator) error {
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return fmt.Errorf("failed to get kube client when collecting gids")
}
pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
glog.Error("failed to get existing persistent volumes")
return err
}
for _, pv := range pvList.Items {
if v1helper.GetPersistentVolumeClass(&pv) != className {
continue
}
pvName := pv.ObjectMeta.Name
gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey]
if !ok {
glog.Warningf("no GID found in pv %v", pvName)
continue
}
gid, err := convertGid(gidStr)
if err != nil {
glog.Errorf("failed to parse gid %s: %v", gidStr, err)
continue
}
_, err = gidTable.Allocate(gid)
if err == ErrConflict {
glog.Warningf("GID %v found in pv %v was already allocated", gid, pvName)
} else if err != nil {
glog.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err)
return err
}
}
return nil
}
// Return the gid table for a storage class.
// - If this is the first time, fill it with all the gids
// used in PVs of this storage class by traversing the PVs.
// - Adapt the range of the table to the current range of the SC.
func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinMaxAllocator, error) {
plugin.gidTableLock.Lock()
gidTable, ok := plugin.gidTable[className]
plugin.gidTableLock.Unlock()
if ok {
err := gidTable.SetRange(min, max)
if err != nil {
return nil, err
}
return gidTable, nil
}
// create a new table and fill it
newGidTable, err := NewMinMaxAllocator(0, absoluteGidMax)
if err != nil {
return nil, err
}
// collect gids with the full range
err = plugin.collectGids(className, newGidTable)
if err != nil {
return nil, err
}
// and only reduce the range afterwards
err = newGidTable.SetRange(min, max)
if err != nil {
return nil, err
}
// if in the meantime a table appeared, use it
plugin.gidTableLock.Lock()
defer plugin.gidTableLock.Unlock()
gidTable, ok = plugin.gidTable[className]
if ok {
err = gidTable.SetRange(min, max)
if err != nil {
return nil, err
}
return gidTable, nil
}
plugin.gidTable[className] = newGidTable
return newGidTable, nil
}
func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
gidStr, ok := d.spec.Annotations[volutil.VolumeGidAnnotationKey]
if !ok {
return 0, false, nil
}
gid, err := convertGid(gidStr)
return gid, true, err
}
func (d *glusterfsVolumeDeleter) Delete() error {
glog.V(2).Infof("delete volume %s", d.glusterfsMounter.path)
volumeName := d.glusterfsMounter.path
volumeID, err := getVolumeID(d.spec, volumeName)
if err != nil {
return fmt.Errorf("failed to get volumeID: %v", err)
}
class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec)
if err != nil {
return err
}
cfg, err := parseClassParameters(class.Parameters, d.plugin.host.GetKubeClient())
if err != nil {
return err
}
d.provisionerConfig = *cfg
glog.V(4).Infof("deleting volume %q", volumeID)
gid, exists, err := d.getGid()
if err != nil {
glog.Error(err)
} else if exists {
gidTable, err := d.plugin.getGidTable(class.Name, cfg.gidMin, cfg.gidMax)
if err != nil {
return fmt.Errorf("failed to get gidTable: %v", err)
}
err = gidTable.Release(gid)
if err != nil {
return fmt.Errorf("failed to release gid %v: %v", gid, err)
}
}
cli := gcli.NewClient(d.url, d.user, d.secretValue)
if cli == nil {
glog.Errorf("failed to create glusterfs REST client")
return fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
}
err = cli.VolumeDelete(volumeID)
if err != nil {
glog.Errorf("failed to delete volume %s: %v", volumeName, err)
return err
}
glog.V(2).Infof("volume %s deleted successfully", volumeName)
//Deleter takes endpoint and namespace from pv spec.
pvSpec := d.spec.Spec
var dynamicEndpoint, dynamicNamespace string
if pvSpec.ClaimRef == nil {
glog.Errorf("ClaimRef is nil")
return fmt.Errorf("ClaimRef is nil")
}
if pvSpec.ClaimRef.Namespace == "" {
glog.Errorf("namespace is nil")
return fmt.Errorf("namespace is nil")
}
dynamicNamespace = pvSpec.ClaimRef.Namespace
if pvSpec.Glusterfs.EndpointsName != "" {
dynamicEndpoint = pvSpec.Glusterfs.EndpointsName
}
glog.V(3).Infof("dynamic namespace and endpoint %v/%v", dynamicNamespace, dynamicEndpoint)
err = d.deleteEndpointService(dynamicNamespace, dynamicEndpoint)
if err != nil {
glog.Errorf("failed to delete endpoint/service %v/%v: %v", dynamicNamespace, dynamicEndpoint, err)
} else {
glog.V(1).Infof("endpoint %v/%v is deleted successfully ", dynamicNamespace, dynamicEndpoint)
}
return nil
}
func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if !volutil.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
}
if p.options.PVC.Spec.Selector != nil {
glog.V(4).Infof("not able to parse your claim Selector")
return nil, fmt.Errorf("not able to parse your claim Selector")
}
if volutil.CheckPersistentVolumeClaimModeBlock(p.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", p.plugin.GetPluginName())
}
glog.V(4).Infof("Provision VolumeOptions %v", p.options)
scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC)
cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient())
if err != nil {
return nil, err
}
p.provisionerConfig = *cfg
gidTable, err := p.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax)
if err != nil {
return nil, fmt.Errorf("failed to get gidTable: %v", err)
}
gid, _, err := gidTable.AllocateNext()
if err != nil {
glog.Errorf("failed to reserve GID from table: %v", err)
return nil, fmt.Errorf("failed to reserve GID from table: %v", err)
}
glog.V(2).Infof("Allocated GID %d for PVC %s", gid, p.options.PVC.Name)
glusterfs, sizeGiB, volID, err := p.CreateVolume(gid)
if err != nil {
if releaseErr := gidTable.Release(gid); releaseErr != nil {
glog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr)
}
glog.Errorf("failed to create volume: %v", err)
return nil, fmt.Errorf("failed to create volume: %v", err)
}
mode := v1.PersistentVolumeFilesystem
pv := new(v1.PersistentVolume)
pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs
pv.Spec.PersistentVolumeReclaimPolicy = p.options.PersistentVolumeReclaimPolicy
pv.Spec.AccessModes = p.options.PVC.Spec.AccessModes
pv.Spec.VolumeMode = &mode
if len(pv.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = p.plugin.GetAccessModes()
}
pv.Spec.MountOptions = p.options.MountOptions
gidStr := strconv.FormatInt(int64(gid), 10)
pv.Annotations = map[string]string{
volutil.VolumeGidAnnotationKey: gidStr,
volutil.VolumeDynamicallyCreatedByKey: heketiAnn,
glusterTypeAnn: "file",
"Description": glusterDescAnn,
heketiVolIDAnn: volID,
}
pv.Spec.Capacity = v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)),
}
return pv, nil
}
func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersistentVolumeSource, size int, volID string, err error) {
var clusterIDs []string
customVolumeName := ""
epServiceName := ""
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// GlusterFS/heketi creates volumes in units of GiB.
sz, err := volutil.RoundUpToGiBInt(capacity)
if err != nil {
return nil, 0, "", err
}
glog.V(2).Infof("create volume of size %dGiB", sz)
if p.url == "" {
glog.Errorf("REST server endpoint is empty")
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
}
cli := gcli.NewClient(p.url, p.user, p.secretValue)
if cli == nil {
glog.Errorf("failed to create glusterfs REST client")
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
}
if p.provisionerConfig.clusterID != "" {
clusterIDs = dstrings.Split(p.clusterID, ",")
glog.V(4).Infof("provided clusterIDs %v", clusterIDs)
}
if p.provisionerConfig.volumeNamePrefix != "" {
customVolumeName = fmt.Sprintf("%s_%s_%s_%s", p.provisionerConfig.volumeNamePrefix, p.options.PVC.Namespace, p.options.PVC.Name, uuid.NewUUID())
}
gid64 := int64(gid)
snaps := struct {
Enable bool `json:"enable"`
Factor float32 `json:"factor"`
}{
true,
p.provisionerConfig.thinPoolSnapFactor,
}
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions, Snapshot: snaps}
volume, err := cli.VolumeCreate(volumeReq)
if err != nil {
glog.Errorf("failed to create volume: %v", err)
return nil, 0, "", fmt.Errorf("failed to create volume: %v", err)
}
glog.V(1).Infof("volume with size %d and name %s created", volume.Size, volume.Name)
volID = volume.Id
dynamicHostIps, err := getClusterNodes(cli, volume.Cluster)
if err != nil {
glog.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
return nil, 0, "", fmt.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
}
if len(p.provisionerConfig.customEpNamePrefix) == 0 {
epServiceName = string(p.options.PVC.UID)
} else {
epServiceName = p.provisionerConfig.customEpNamePrefix + "-" + string(p.options.PVC.UID)
}
epNamespace := p.options.PVC.Namespace
endpoint, service, err := p.createEndpointService(epNamespace, epServiceName, dynamicHostIps, p.options.PVC.Name)
if err != nil {
glog.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
deleteErr := cli.VolumeDelete(volume.Id)
if deleteErr != nil {
glog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr)
}
return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
}
glog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service)
return &v1.GlusterfsPersistentVolumeSource{
EndpointsName: endpoint.Name,
EndpointsNamespace: &epNamespace,
Path: volume.Name,
ReadOnly: false,
}, sz, volID, nil
}
// createEndpointService() makes sure an endpoint and service
// exist for the given namespace, PVC name, endpoint name, and
// set of IPs. I.e. the endpoint or service is only created
// if it does not exist yet.
func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *v1.Endpoints, service *v1.Service, err error) {
addrlist := make([]v1.EndpointAddress, len(hostips))
for i, v := range hostips {
addrlist[i].IP = v
}
endpoint = &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: epServiceName,
Labels: map[string]string{
"gluster.kubernetes.io/provisioned-for-pvc": pvcname,
},
},
Subsets: []v1.EndpointSubset{{
Addresses: addrlist,
Ports: []v1.EndpointPort{{Port: 1, Protocol: "TCP"}},
}},
}
kubeClient := p.plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, nil, fmt.Errorf("failed to get kube client when creating endpoint service")
}
_, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint)
if err != nil && errors.IsAlreadyExists(err) {
glog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace)
err = nil
}
if err != nil {
glog.Errorf("failed to create endpoint: %v", err)
return nil, nil, fmt.Errorf("failed to create endpoint: %v", err)
}
service = &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: epServiceName,
Namespace: namespace,
Labels: map[string]string{
"gluster.kubernetes.io/provisioned-for-pvc": pvcname,
},
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{Protocol: "TCP", Port: 1}}}}
_, err = kubeClient.CoreV1().Services(namespace).Create(service)
if err != nil && errors.IsAlreadyExists(err) {
glog.V(1).Infof("service %s already exist in namespace %s", service, namespace)
err = nil
}
if err != nil {
glog.Errorf("failed to create service: %v", err)
return nil, nil, fmt.Errorf("error creating service: %v", err)
}
return endpoint, service, nil
}
func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServiceName string) (err error) {
kubeClient := d.plugin.host.GetKubeClient()
if kubeClient == nil {
return fmt.Errorf("failed to get kube client when deleting endpoint service")
}
err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil)
if err != nil {
glog.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err)
return fmt.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err)
}
glog.V(1).Infof("service/endpoint: %s/%s deleted successfully", namespace, epServiceName)
return nil
}
// parseSecret finds a given Secret instance and reads user password from it.
func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) {
secretMap, err := volutil.GetSecretForPV(namespace, secretName, glusterfsPluginName, kubeClient)
if err != nil {
glog.Errorf("failed to get secret: %s/%s: %v", namespace, secretName, err)
return "", fmt.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err)
}
if len(secretMap) == 0 {
return "", fmt.Errorf("empty secret map")
}
secret := ""
for k, v := range secretMap {
if k == secretKeyName {
return v, nil
}
secret = v
}
// If not found, the last secret in the map wins as done before
return secret, nil
}
// getClusterNodes() returns the cluster nodes of a given cluster
func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) {
clusterinfo, err := cli.ClusterInfo(cluster)
if err != nil {
glog.Errorf("failed to get cluster details: %v", err)
return nil, fmt.Errorf("failed to get cluster details: %v", err)
}
// For the dynamically provisioned volume, we gather the list of node IPs
// of the cluster on which provisioned volume belongs to, as there can be multiple
// clusters.
for _, node := range clusterinfo.Nodes {
nodeInfo, err := cli.NodeInfo(string(node))
if err != nil {
glog.Errorf("failed to get host ipaddress: %v", err)
return nil, fmt.Errorf("failed to get host ipaddress: %v", err)
}
ipaddr := dstrings.Join(nodeInfo.NodeAddRequest.Hostnames.Storage, "")
dynamicHostIps = append(dynamicHostIps, ipaddr)
}
glog.V(3).Infof("host list :%v", dynamicHostIps)
if len(dynamicHostIps) == 0 {
glog.Errorf("no hosts found: %v", err)
return nil, fmt.Errorf("no hosts found: %v", err)
}
return dynamicHostIps, nil
}
// parseClassParameters parses StorageClass parameters.
func parseClassParameters(params map[string]string, kubeClient clientset.Interface) (*provisionerConfig, error) {
var cfg provisionerConfig
var err error
cfg.gidMin = defaultGidMin
cfg.gidMax = defaultGidMax
cfg.customEpNamePrefix = dynamicEpSvcPrefix
authEnabled := true
parseVolumeType := ""
parseVolumeOptions := ""
parseVolumeNamePrefix := ""
parseThinPoolSnapFactor := ""
//thin pool snap factor default to 1.0
cfg.thinPoolSnapFactor = float32(1.0)
for k, v := range params {
switch dstrings.ToLower(k) {
case "resturl":
cfg.url = v
case "restuser":
cfg.user = v
case "restuserkey":
cfg.userKey = v
case "secretname":
cfg.secretName = v
case "secretnamespace":
cfg.secretNamespace = v
case "clusterid":
if len(v) != 0 {
cfg.clusterID = v
}
case "restauthenabled":
authEnabled = dstrings.ToLower(v) == "true"
case "gidmin":
parseGidMin, err := convertGid(v)
if err != nil {
return nil, fmt.Errorf("invalid gidMin value %q for volume plugin %s", k, glusterfsPluginName)
}
if parseGidMin < absoluteGidMin {
return nil, fmt.Errorf("gidMin must be >= %v", absoluteGidMin)
}
if parseGidMin > absoluteGidMax {
return nil, fmt.Errorf("gidMin must be <= %v", absoluteGidMax)
}
cfg.gidMin = parseGidMin
case "gidmax":
parseGidMax, err := convertGid(v)
if err != nil {
return nil, fmt.Errorf("invalid gidMax value %q for volume plugin %s", k, glusterfsPluginName)
}
if parseGidMax < absoluteGidMin {
return nil, fmt.Errorf("gidMax must be >= %v", absoluteGidMin)
}
if parseGidMax > absoluteGidMax {
return nil, fmt.Errorf("gidMax must be <= %v", absoluteGidMax)
}
cfg.gidMax = parseGidMax
case "volumetype":
parseVolumeType = v
case "volumeoptions":
if len(v) != 0 {
parseVolumeOptions = v
}
case "volumenameprefix":
if len(v) != 0 {
parseVolumeNamePrefix = v
}
case "snapfactor":
if len(v) != 0 {
parseThinPoolSnapFactor = v
}
case "customepnameprefix":
// If the string has > 'maxCustomEpNamePrefixLen' chars, the final endpoint name will
// exceed the limitation of 63 chars, so fail if prefix is > 'maxCustomEpNamePrefixLen'
// characters. This is only applicable for 'customepnameprefix' string and default ep name
// string will always pass.
if len(v) <= maxCustomEpNamePrefixLen {
cfg.customEpNamePrefix = v
} else {
return nil, fmt.Errorf("'customepnameprefix' value should be < %d characters", maxCustomEpNamePrefixLen)
}
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName)
}
}
if len(cfg.url) == 0 {
return nil, fmt.Errorf("StorageClass for provisioner %s must contain 'resturl' parameter", glusterfsPluginName)
}
if len(parseVolumeType) == 0 {
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: replicaCount}}
} else {
parseVolumeTypeInfo := dstrings.Split(parseVolumeType, ":")
switch parseVolumeTypeInfo[0] {
case "replicate":
if len(parseVolumeTypeInfo) >= 2 {<|fim▁hole|> cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: newReplicaCount}}
} else {
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: replicaCount}}
}
case "disperse":
if len(parseVolumeTypeInfo) >= 3 {
newDisperseData, err := convertVolumeParam(parseVolumeTypeInfo[1])
if err != nil {
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[1], err)
}
newDisperseRedundancy, err := convertVolumeParam(parseVolumeTypeInfo[2])
if err != nil {
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[2], err)
}
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityEC, Disperse: gapi.DisperseDurability{Data: newDisperseData, Redundancy: newDisperseRedundancy}}
} else {
return nil, fmt.Errorf("StorageClass for provisioner %q must have data:redundancy count set for disperse volumes in storage class option '%s'", glusterfsPluginName, "volumetype")
}
case "none":
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityDistributeOnly}
default:
return nil, fmt.Errorf("error parsing value for option 'volumetype' for volume plugin %s", glusterfsPluginName)
}
}
if !authEnabled {
cfg.user = ""
cfg.secretName = ""
cfg.secretNamespace = ""
cfg.userKey = ""
cfg.secretValue = ""
}
if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 {
// secretName + Namespace has precedence over userKey
if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 {
cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, kubeClient)
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("StorageClass for provisioner %q must have secretNamespace and secretName either both set or both empty", glusterfsPluginName)
}
} else {
cfg.secretValue = cfg.userKey
}
if cfg.gidMin > cfg.gidMax {
return nil, fmt.Errorf("StorageClass for provisioner %q must have gidMax value >= gidMin", glusterfsPluginName)
}
if len(parseVolumeOptions) != 0 {
volOptions := dstrings.Split(parseVolumeOptions, ",")
if len(volOptions) == 0 {
return nil, fmt.Errorf("StorageClass for provisioner %q must have valid (for e.g., 'client.ssl on') volume option", glusterfsPluginName)
}
cfg.volumeOptions = volOptions
}
if len(parseVolumeNamePrefix) != 0 {
if dstrings.Contains(parseVolumeNamePrefix, "_") {
return nil, fmt.Errorf("Storageclass parameter 'volumenameprefix' should not contain '_' in its value")
}
cfg.volumeNamePrefix = parseVolumeNamePrefix
}
if len(parseThinPoolSnapFactor) != 0 {
thinPoolSnapFactor, err := strconv.ParseFloat(parseThinPoolSnapFactor, 32)
if err != nil {
return nil, fmt.Errorf("failed to convert snapfactor %v to float: %v", parseThinPoolSnapFactor, err)
}
if thinPoolSnapFactor < 1.0 || thinPoolSnapFactor > 100.0 {
return nil, fmt.Errorf("invalid snapshot factor %v, the value must be between 1 to 100", thinPoolSnapFactor)
}
cfg.thinPoolSnapFactor = float32(thinPoolSnapFactor)
}
return &cfg, nil
}
// getVolumeID returns volumeID from the PV or volumename.
func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) {
volumeID := ""
// Get volID from pvspec if available, else fill it from volumename.
if pv != nil {
if pv.Annotations[heketiVolIDAnn] != "" {
volumeID = pv.Annotations[heketiVolIDAnn]
} else {
volumeID = dstrings.TrimPrefix(volumeName, volPrefix)
}
} else {
return volumeID, fmt.Errorf("provided PV spec is nil")
}
if volumeID == "" {
return volumeID, fmt.Errorf("volume ID is empty")
}
return volumeID, nil
}
func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
pvSpec := spec.PersistentVolume.Spec
volumeName := pvSpec.Glusterfs.Path
glog.V(2).Infof("Received request to expand volume %s", volumeName)
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
if err != nil {
return oldSize, fmt.Errorf("failed to get volumeID for volume %s: %v", volumeName, err)
}
//Get details of StorageClass.
class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)
if err != nil {
return oldSize, err
}
cfg, err := parseClassParameters(class.Parameters, plugin.host.GetKubeClient())
if err != nil {
return oldSize, err
}
glog.V(4).Infof("expanding volume: %q", volumeID)
//Create REST server connection
cli := gcli.NewClient(cfg.url, cfg.user, cfg.secretValue)
if cli == nil {
glog.Errorf("failed to create glusterfs REST client")
return oldSize, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
}
// Find out delta size
expansionSize := (newSize.Value() - oldSize.Value())
expansionSizeGiB := int(volutil.RoundUpSize(expansionSize, volutil.GIB))
// Find out requested Size
requestGiB := volutil.RoundUpToGiB(newSize)
//Check the existing volume size
currentVolumeInfo, err := cli.VolumeInfo(volumeID)
if err != nil {
glog.Errorf("error when fetching details of volume %s: %v", volumeName, err)
return oldSize, err
}
if int64(currentVolumeInfo.Size) >= requestGiB {
return newSize, nil
}
// Make volume expansion request
volumeExpandReq := &gapi.VolumeExpandRequest{Size: expansionSizeGiB}
// Expand the volume
volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq)
if err != nil {
glog.Errorf("failed to expand volume %s: %v", volumeName, err)
return oldSize, err
}
glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size)
newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size))
return newVolumeSize, nil
}<|fim▁end|> | newReplicaCount, err := convertVolumeParam(parseVolumeTypeInfo[1])
if err != nil {
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[1], err)
} |
<|file_name|>choose.rs<|end_file_name|><|fim▁begin|>//! depends: rand = "0.3"
//! depends: regex = "0.2"
extern crate zaldinar_core;
extern crate regex;
extern crate rand;
use rand::Rng;
use zaldinar_core::client::PluginRegister;
use zaldinar_core::events::CommandEvent;
macro_rules! regex {
($s:expr) => (::regex::Regex::new($s).unwrap())
}
fn choose(event: &CommandEvent) {
let content = event.args.join(" ");
let mut rng = rand::thread_rng();
let split = if content.contains(",") {
regex!(r"\s*,\s*").split(&content).collect::<Vec<&str>>()
} else {
regex!(r"\s+").split(&content).collect::<Vec<&str>>()
};
let message = match rng.choose(&split) {
Some(v) => *v,
None => "I don't have anything to choose from.",
};
event.client.send_message(event.channel(), message);
}
fn coin(event: &CommandEvent) {
let mut rng = rand::thread_rng();
let message = format!("\x01ACTION flips a coin... \x02{}\x02\x01", rng.choose(&["heads",
"tails"]).unwrap());
event.client.send_message(event.channel(), message);
}
fn rand_command(event: &CommandEvent) {
if event.args.len() != 1 {
event.client.send_message(event.channel(), "Please specify exactly one argument.");
return;
}
let max = match event.args[0].parse::<u64>() {
Ok(v) => v,
Err(_) => {
event.client.send_message(event.channel(),
format!("Invalid number '{}'", event.args[0]));
return;
},
};
let mut rng = rand::thread_rng();
event.client.send_message(event.channel(), format!("{}", rng.gen_range(0, max) + 1));
}
pub fn register(register: &mut PluginRegister) {
register.register_command("choose", choose);<|fim▁hole|> register.register_command("coin", coin);
register.register_command("rand", rand_command);
}<|fim▁end|> | |
<|file_name|>hitting_sets.cpp<|end_file_name|><|fim▁begin|>/*++
Copyright (c) 2014 Microsoft Corporation
Module Name:
hitting_sets.h
Abstract:
Hitting set approximations.
Author:
Nikolaj Bjorner (nbjorner) 2014-06-06
Notes:
--*/
#include "vector.h"
#include "util.h"
#include "hitting_sets.h"
#include "simplex.h"
#include "sparse_matrix_def.h"
#include "simplex_def.h"
typedef simplex::simplex<simplex::mpz_ext> Simplex;
typedef simplex::sparse_matrix<simplex::mpz_ext> sparse_matrix;
namespace opt {
struct hitting_sets::imp {
class justification {
public:
enum kind_t { AXIOM, DECISION, CLAUSE };
private:
kind_t m_kind;
unsigned m_value;
bool m_pos;
public:
explicit justification(kind_t k):m_kind(k), m_value(0), m_pos(false) {}
explicit justification(unsigned v, bool pos):m_kind(CLAUSE), m_value(v), m_pos(pos) {}
justification(justification const& other):
m_kind(other.m_kind), m_value(other.m_value), m_pos(other.m_pos) {}
justification& operator=(justification const& other) {
m_kind = other.m_kind;
m_value = other.m_value;
m_pos = other.m_pos;
return *this;
}
unsigned clause() const { return m_value; }
bool is_axiom() const { return m_kind == AXIOM; }
bool is_decision() const { return m_kind == DECISION; }
bool is_clause() const { return m_kind == CLAUSE; }
kind_t kind() const { return m_kind; }
bool pos() const { return m_pos; }
};
class set {
unsigned m_num_elems;
unsigned m_elems[0];
set(): m_num_elems(0) {}
public:
static set* mk(small_object_allocator& alloc, unsigned sz, unsigned const* elems) {
unsigned size = (sz+1)*sizeof(unsigned);
void * mem = alloc.allocate(size);
set* result = new (mem) set();
result->m_num_elems = sz;
memcpy(result->m_elems, elems, sizeof(unsigned)*sz);
return result;
}
inline unsigned operator[](unsigned idx) const {
SASSERT(idx < m_num_elems);
return m_elems[idx];
}
inline unsigned& operator[](unsigned idx) {
SASSERT(idx < m_num_elems);
return m_elems[idx];
}
unsigned size() const { return m_num_elems; }
unsigned alloc_size() const { return (m_num_elems + 1)*sizeof(unsigned); }
bool empty() const { return 0 == size(); }
};
reslimit& m_limit;
rational m_lower;
rational m_upper;
vector<rational> m_weights;
vector<rational> m_weights_inv;
rational m_max_weight;
rational m_denominator;
small_object_allocator m_alloc;
ptr_vector<set> m_T;
ptr_vector<set> m_F;
svector<lbool> m_value;
svector<lbool> m_model;
vector<unsigned_vector> m_tuse_list;
vector<unsigned_vector> m_fuse_list;
// Custom CDCL solver.
svector<justification> m_justification;
vector<unsigned_vector> m_twatch;
vector<unsigned_vector> m_fwatch;
unsigned_vector m_level;
unsigned_vector m_trail; // trail of assigned literals
unsigned m_qhead; // queue head
justification m_conflict_j; // conflict justification
unsigned m_conflict_l; // conflict literal
bool m_inconsistent;
unsigned m_scope_lvl;
rational m_weight; // current weight of assignment.
unsigned_vector m_indices;
unsigned_vector m_scores;
vector<rational> m_scored_weights;
svector<bool> m_score_updated;
bool m_enable_simplex;
struct compare_scores {
imp* m_imp;
compare_scores():m_imp(0) {}
bool operator()(int v1, int v2) const {
return m_imp->m_scored_weights[v1] > m_imp->m_scored_weights[v2];
}
};
compare_scores m_compare_scores;
heap<compare_scores> m_heap;
svector<bool> m_mark;
struct scope {
unsigned m_trail_lim;
};
vector<scope> m_scopes;
unsigned_vector m_lemma;
unsigned m_conflict_lvl;
// simplex
unsynch_mpz_manager m;
Simplex m_simplex;
unsigned m_weights_var;
static unsigned const null_idx = UINT_MAX;
imp(reslimit& lim):
m_limit(lim),
m_max_weight(0),
m_denominator(1),
m_alloc("hitting-sets"),
m_qhead(0),
m_conflict_j(justification(justification::AXIOM)),
m_inconsistent(false),
m_scope_lvl(0),
m_compare_scores(),
m_heap(0, m_compare_scores),
m_simplex(lim),
m_weights_var(0) {
m_enable_simplex = true;
m_compare_scores.m_imp = this;
}
~imp() {
for (unsigned i = 0; i < m_T.size(); ++i) {
m_alloc.deallocate(m_T[i]->alloc_size(), m_T[i]);
}
for (unsigned i = 0; i < m_F.size(); ++i) {
m_alloc.deallocate(m_F[i]->alloc_size(), m_F[i]);
}
}
void add_weight(rational const& w) {
SASSERT(w.is_pos());
unsigned var = m_weights.size();
m_simplex.ensure_var(var);
m_simplex.set_lower(var, mpq_inf(mpq(0),mpq(0)));
m_simplex.set_upper(var, mpq_inf(mpq(1),mpq(0)));
m_weights.push_back(w);
m_weights_inv.push_back(rational::one());
m_value.push_back(l_undef);
m_justification.push_back(justification(justification::DECISION));
m_tuse_list.push_back(unsigned_vector());
m_fuse_list.push_back(unsigned_vector());
m_twatch.push_back(unsigned_vector());
m_fwatch.push_back(unsigned_vector());
m_level.push_back(0);
m_indices.push_back(var);
m_model.push_back(l_undef);
m_mark.push_back(false);
m_scores.push_back(0);
m_scored_weights.push_back(rational(0));
m_score_updated.push_back(true);
m_max_weight += w;
}
justification add_exists_false(unsigned sz, unsigned const* S) {
return add_exists(sz, S, true);
}
justification add_exists_true(unsigned sz, unsigned const* S) {
return add_exists(sz, S, false);
}
justification add_exists(unsigned sz, unsigned const* S, bool sign) {
vector<unsigned_vector>& use_list = sign?m_fuse_list:m_tuse_list;
lbool val = sign?l_false:l_true;
justification j(justification::AXIOM);
ptr_vector<set>& Sets = sign?m_F:m_T;
vector<unsigned_vector>& watch = sign?m_fwatch:m_twatch;
init_weights();
if (sz == 0) {
set_conflict(0, justification(justification::AXIOM));
}
else if (sz == 1) {
IF_VERBOSE(2, verbose_stream() << "unit literal : " << S[0] << " " << val << "\n";);
assign(S[0], val, justification(justification::AXIOM));
}
else {
unsigned clause_id = Sets.size();
for (unsigned i = 0; i < sz; ++i) {
use_list[S[i]].push_back(clause_id);
}
j = justification(clause_id, !sign);
watch[S[0]].push_back(clause_id);
watch[S[1]].push_back(clause_id);
Sets.push_back(set::mk(m_alloc, sz, S));
if (!sign) {
pop(scope_lvl());
inc_score(clause_id);
}
TRACE("opt", display(tout, j););
IF_VERBOSE(2, if (!sign) display(verbose_stream(), j););
if (!sign && m_enable_simplex) {
add_simplex_row(!sign, sz, S);
}
}
return j;
}
lbool compute_lower() {
m_lower.reset();
rational w1 = L1();
rational w2 = L2();
rational w3 = L3();
if (w1 > m_lower) m_lower = w1;
if (w2 > m_lower) m_lower = w2;
if (w3 > m_lower) m_lower = w3;
return l_true;
}
lbool compute_upper() {
m_upper = m_max_weight;
unsigned fsz = m_F.size();
lbool r = search();
pop(scope_lvl());
IF_VERBOSE(1, verbose_stream() << "(hsmax.negated-size: " << fsz << ")\n";);
#if 0
// garbage collect agressively on exit.
// all learned clases for negative branches are
// pruned.
for (unsigned i = fsz; i < m_F.size(); ++i) {
m_alloc.deallocate(m_F[i]->alloc_size(), m_F[i]);
}
m_F.resize(fsz);
for (unsigned i = 0; i < m_fuse_list.size(); ++i) {
unsigned_vector & uses = m_fuse_list[i];
while (!uses.empty() && uses.back() >= fsz) uses.pop_back();
unsigned_vector & watch = m_fwatch[i];
unsigned j = 0, k = 0;
for (; j < watch.size(); ++j) {
if (watch[j] < fsz) {
watch[k] = watch[j];
++k;
}
}
watch.resize(k);
}
#endif
return r;
}
rational get_lower() {
return m_lower/m_denominator;
}
rational get_upper() {
return m_upper/m_denominator;
}
void set_upper(rational const& r) {
m_max_weight = r*m_denominator;
}
bool get_value(unsigned idx) {
return <|fim▁hole|> }
void collect_statistics(::statistics& st) const {
m_simplex.collect_statistics(st);
}
void reset() {
m_lower.reset();
m_upper = m_max_weight;
}
void init_weights() {
if (m_weights_var != 0) {
return;
}
m_weights_var = m_weights.size();
unsigned_vector vars;
scoped_mpz_vector coeffs(m);
// normalize weights to integral.
rational d(1);
for (unsigned i = 0; i < m_weights.size(); ++i) {
d = lcm(d, denominator(m_weights[i]));
}
m_denominator = d;
if (!d.is_one()) {
for (unsigned i = 0; i < m_weights.size(); ++i) {
m_weights[i] *= d;
}
}
rational lc(1);
for (unsigned i = 0; i < m_weights.size(); ++i) {
lc = lcm(lc, m_weights[i]);
}
for (unsigned i = 0; i < m_weights.size(); ++i) {
m_weights_inv[i] = lc/m_weights[i];
}
m_heap.set_bounds(m_weights.size());
for (unsigned i = 0; i < m_weights.size(); ++i) {
m_heap.insert(i);
}
update_heap();
// set up Simplex objective function.
for (unsigned i = 0; i < m_weights.size(); ++i) {
vars.push_back(i);
coeffs.push_back(m_weights[i].to_mpq().numerator());
}
m_simplex.ensure_var(m_weights_var);
vars.push_back(m_weights_var);
coeffs.push_back(mpz(-1));
m_simplex.add_row(m_weights_var, coeffs.size(), vars.c_ptr(), coeffs.c_ptr());
}
void display(std::ostream& out) const {
out << "inconsistent: " << m_inconsistent << "\n";
out << "weight: " << m_weight << "\n";
for (unsigned i = 0; i < m_weights.size(); ++i) {
out << i << ": " << value(i) << " w: " << m_weights[i] << " s: " << m_scores[i] << "\n";
}
for (unsigned i = 0; i < m_T.size(); ++i) {
display(out << "+" << i << ": ", *m_T[i]);
}
for (unsigned i = 0; i < m_F.size(); ++i) {
display(out << "-" << i << ": ", *m_F[i]);
}
out << "watch lists:\n";
for (unsigned i = 0; i < m_fwatch.size(); ++i) {
out << i << ": ";
for (unsigned j = 0; j < m_twatch[i].size(); ++j) {
out << "+" << m_twatch[i][j] << " ";
}
for (unsigned j = 0; j < m_fwatch[i].size(); ++j) {
out << "-" << m_fwatch[i][j] << " ";
}
out << "\n";
}
out << "trail\n";
for (unsigned i = 0; i < m_trail.size(); ++i) {
unsigned idx = m_trail[i];
out << (m_justification[idx].is_decision()?"d":"") << idx << " ";
}
out << "\n";
}
void display(std::ostream& out, set const& S) const {
for (unsigned i = 0; i < S.size(); ++i) {
out << S[i] << " ";
}
out << "\n";
}
void display(std::ostream& out, justification const& j) const {
switch(j.kind()) {
case justification::AXIOM:
out << "axiom\n";
break;
case justification::DECISION:
out << "decision\n";
break;
case justification::CLAUSE: {
out << "clause: ";
set const& S = j.pos()?(*m_T[j.clause()]):(*m_F[j.clause()]);
for (unsigned i = 0; i < S.size(); ++i) {
out << S[i] << " ";
}
out << "\n";
}
}
}
void display_lemma(std::ostream& out) {
out << "lemma: ";
for (unsigned i = 0; i < m_lemma.size(); ++i) {
out << m_lemma[i] << " ";
}
out << "\n";
}
struct scoped_push {
imp& s;
scoped_push(imp& s):s(s) { s.push(); }
~scoped_push() { s.pop(1); }
};
struct value_lt {
vector<rational> const& weights;
value_lt(vector<rational> const& weights):
weights(weights) {}
bool operator()(int v1, int v2) const {
return weights[v1] > weights[v2];
}
};
void inc_score(unsigned clause_id) {
set const& S = *m_T[clause_id];
if (!has_selected(S)) {
for (unsigned j = 0; j < S.size(); ++j) {
++m_scores[S[j]];
m_score_updated[S[j]] = true;
}
}
}
void dec_score(unsigned clause_id) {
set const& S = *m_T[clause_id];
if (!has_selected(S)) {
for (unsigned j = 0; j < S.size(); ++j) {
SASSERT(m_scores[S[j]] > 0);
--m_scores[S[j]];
m_score_updated[S[j]] = true;
}
}
}
void update_score(unsigned idx, bool inc) {
unsigned_vector const& uses = m_tuse_list[idx];
for (unsigned i = 0; i < uses.size(); ++i) {
if (inc) {
inc_score(uses[i]);
}
else {
dec_score(uses[i]);
}
}
}
rational L1() {
rational w(m_weight);
scoped_push _sc(*this);
for (unsigned i = 0; !canceled() && i < m_T.size(); ++i) {
set const& S = *m_T[i];
SASSERT(!S.empty());
if (!has_selected(S)) {
w += m_weights[select_min(S)];
for (unsigned j = 0; j < S.size(); ++j) {
assign(S[j], l_true, justification(justification::DECISION));
}
}
}
return w;
}
void update_heap() {
for (unsigned i = 0; i < m_scored_weights.size(); ++i) {
if (m_score_updated[i]) {
rational const& old_w = m_scored_weights[i];
rational new_w = rational(m_scores[i])*m_weights_inv[i];
if (new_w > old_w) {
m_scored_weights[i] = new_w;
//m_heap.decreased(i);
}
else if (new_w < old_w) {
m_scored_weights[i] = new_w;
//m_heap.increased(i);
}
m_score_updated[i] = false;
}
}
}
rational L2() {
rational w(m_weight);
scoped_push _sc(*this);
int n = 0;
for (unsigned i = 0; i < m_T.size(); ++i) {
if (!has_selected(*m_T[i])) ++n;
}
update_heap();
value_lt lt(m_scored_weights);
std::sort(m_indices.begin(), m_indices.end(), lt);
for(unsigned i = 0; i < m_indices.size() && n > 0; ++i) {
// deg(c) = score(c)
// wt(c) = m_weights[c]
unsigned idx = m_indices[i];
if (m_scores[idx] == 0) {
break;
}
if (m_scores[idx] < static_cast<unsigned>(n) || m_weights[idx].is_one()) {
w += m_weights[idx];
}
else {
w += div((rational(n)*m_weights[idx]), rational(m_scores[idx]));
}
n -= m_scores[idx];
}
return w;
}
rational L3() {
TRACE("simplex", m_simplex.display(tout););
VERIFY(l_true == m_simplex.make_feasible());
TRACE("simplex", m_simplex.display(tout););
VERIFY(l_true == m_simplex.minimize(m_weights_var));
mpq_inf const& val = m_simplex.get_value(m_weights_var);
unsynch_mpq_inf_manager mg;
unsynch_mpq_manager& mq = mg.get_mpq_manager();
scoped_mpq c(mq);
mg.ceil(val, c);
rational w(c);
CTRACE("simplex",
w >= m_weight, tout << w << " " << m_weight << " !!!!\n";
display(tout););
SASSERT(w >= m_weight);
return w;
}
void add_simplex_row(bool is_some_true, unsigned sz, unsigned const* S) {
unsigned_vector vars;
scoped_mpz_vector coeffs(m);
for (unsigned i = 0; i < sz; ++i) {
vars.push_back(S[i]);
coeffs.push_back(mpz(1));
}
unsigned base_var = m_F.size() + m_T.size() + m_weights.size();
m_simplex.ensure_var(base_var);
vars.push_back(base_var);
coeffs.push_back(mpz(-1));
// S - base_var = 0
if (is_some_true) {
// base_var >= 1
m_simplex.set_lower(base_var, mpq_inf(mpq(1),mpq(0)));
}
else {
// base_var <= sz-1
m_simplex.set_upper(base_var, mpq_inf(mpq(sz-1),mpq(0)));
}
m_simplex.add_row(base_var, coeffs.size(), vars.c_ptr(), coeffs.c_ptr());
}
unsigned select_min(set const& S) {
unsigned result = S[0];
for (unsigned i = 1; i < S.size(); ++i) {
if (m_weights[result] > m_weights[S[i]]) {
result = S[i];
}
}
return result;
}
bool have_selected(lbool val, ptr_vector<set> const& Sets, unsigned& i) {
for (i = 0; i < Sets.size(); ++i) {
if (!has_selected(val, *Sets[i])) return false;
}
return true;
}
void set_undef_to_false() {
for (unsigned i = 0; i < m_model.size(); ++i) {
if (m_model[i] == l_undef) {
m_model[i] = l_false;
}
}
}
bool values_satisfy_Fs(unsigned& i) {
unsigned j = 0;
for (i = 0; i < m_F.size(); ++i) {
set const& F = *m_F[i];
for (j = 0; j < F.size(); ++j) {
if (m_model[F[j]] == l_false) {
break;
}
}
if (F.size() == j) {
break;
}
}
return i == m_F.size();
}
bool has_selected(set const& S) {
return has_selected(l_true, S);
}
bool has_unselected(set const& S) {
return has_selected(l_false, S);
}
bool has_unset(set const& S) {
return has_selected(l_undef, S);
}
bool has_selected(lbool val, set const& S) {
for (unsigned i = 0; i < S.size(); ++i) {
if (val == value(S[i])) {
return true;
}
}
return false;
}
// (greedy) CDCL learner for hitting sets.
inline unsigned scope_lvl() const { return m_scope_lvl; }
inline bool inconsistent() const { return m_inconsistent; }
inline bool canceled() const { return !m_limit.inc(); }
inline unsigned lvl(unsigned idx) const { return m_level[idx]; }
inline lbool value(unsigned idx) const { return m_value[idx]; }
inline bool is_marked(unsigned v) const { return m_mark[v] != 0; }
inline void mark(unsigned v) { SASSERT(!is_marked(v)); m_mark[v] = true; }
inline void reset_mark(unsigned v) { SASSERT(is_marked(v)); m_mark[v] = false; }
void push() {
SASSERT(!inconsistent());
++m_scope_lvl;
m_scopes.push_back(scope());
scope& s = m_scopes.back();
s.m_trail_lim = m_trail.size();
}
void pop(unsigned n) {
if (n > 0) {
m_inconsistent = false;
m_scope_lvl = scope_lvl() - n;
unassign(m_scopes[scope_lvl()].m_trail_lim);
m_scopes.shrink(scope_lvl());
}
}
void assign(unsigned idx, lbool val, justification const& justification) {
if (val == l_true) {
m_weight += m_weights[idx];
update_score(idx, false);
if (m_enable_simplex) {
m_simplex.set_lower(idx, mpq_inf(mpq(1),mpq(0)));
}
}
SASSERT(val != l_true || m_scores[idx] == 0);
m_value[idx] = val;
m_justification[idx] = justification;
m_trail.push_back(idx);
m_level[idx] = scope_lvl();
TRACE("opt", tout << idx << " := " << val << " scope: " << scope_lvl() << " w: " << m_weight << "\n";);
}
svector<unsigned> m_replay_idx;
svector<lbool> m_replay_val;
void unassign(unsigned sz) {
for (unsigned j = sz; j < m_trail.size(); ++j) {
unsigned idx = m_trail[j];
lbool val = value(idx);
m_value[idx] = l_undef;
if (val == l_true) {
m_weight -= m_weights[idx];
update_score(idx, true);
if (m_enable_simplex) {
m_simplex.set_lower(idx, mpq_inf(mpq(0),mpq(0)));
}
}
if (m_justification[idx].is_axiom()) {
m_replay_idx.push_back(idx);
m_replay_val.push_back(val);
}
}
TRACE("opt", tout << m_weight << "\n";);
m_trail.shrink(sz);
m_qhead = sz;
for (unsigned i = m_replay_idx.size(); i > 0; ) {
--i;
unsigned idx = m_replay_idx[i];
lbool val = m_replay_val[i];
assign(idx, val, justification(justification::AXIOM));
}
m_replay_idx.reset();
m_replay_val.reset();
}
lbool search() {
TRACE("opt", display(tout););
pop(scope_lvl());
while (true) {
while (true) {
propagate();
if (canceled()) return l_undef;
if (!inconsistent()) break;
if (!resolve_conflict()) return l_false;
SASSERT(!inconsistent());
}
if (!decide()) {
SASSERT(validate_model());
m_model.reset();
m_model.append(m_value);
m_upper = m_weight;
// SASSERT(m_weight < m_max_weight);
return l_true;
}
}
}
bool validate_model() {
for (unsigned i = 0; i < m_T.size(); ++i) {
set const& S = *m_T[i];
bool found = false;
for (unsigned j = 0; !found && j < S.size(); ++j) {
found = value(S[j]) == l_true;
}
CTRACE("opt", !found,
display(tout << "not found: " << i << "\n", S);
display(tout););
SASSERT(found);
}
for (unsigned i = 0; i < m_F.size(); ++i) {
set const& S = *m_F[i];
bool found = false;
for (unsigned j = 0; !found && j < S.size(); ++j) {
found = value(S[j]) != l_true;
}
CTRACE("opt", !found,
display(tout << "not found: " << i << "\n", S);
display(tout););
SASSERT(found);
}
return true;
}
bool invariant() {
for (unsigned i = 0; i < m_fwatch.size(); ++i) {
for (unsigned j = 0; j < m_fwatch[i].size(); ++j) {
set const& S = *m_F[m_fwatch[i][j]];
SASSERT(S[0] == i || S[1] == i);
}
}
for (unsigned i = 0; i < m_twatch.size(); ++i) {
for (unsigned j = 0; j < m_twatch[i].size(); ++j) {
set const& S = *m_T[m_twatch[i][j]];
SASSERT(S[0] == i || S[1] == i);
}
}
return true;
}
bool resolve_conflict() {
while (true) {
if (!resolve_conflict_core()) return false;
if (!inconsistent()) return true;
}
}
unsigned get_max_lvl(unsigned conflict_l, justification const& conflict_j) {
if (scope_lvl() == 0) return 0;
unsigned r = lvl(conflict_l);
if (conflict_j.is_clause()) {
unsigned clause = conflict_j.clause();
ptr_vector<set> const& S = conflict_j.pos()?m_T:m_F;
r = std::max(r, lvl((*S[clause])[0]));
r = std::max(r, lvl((*S[clause])[1]));
}
return r;
}
bool resolve_conflict_core() {
SASSERT(inconsistent());
TRACE("opt", display(tout););
unsigned conflict_l = m_conflict_l;
justification conflict_j(m_conflict_j);
if (conflict_j.is_axiom()) {
return false;
}
m_conflict_lvl = get_max_lvl(conflict_l, conflict_j);
if (m_conflict_lvl == 0) {
return false;
}
unsigned idx = skip_above_conflict_level();
unsigned num_marks = 0;
m_lemma.reset();
m_lemma.push_back(0);
process_antecedent(conflict_l, num_marks);
do {
TRACE("opt", tout << "conflict literal: " << conflict_l << "\n";
display(tout, conflict_j););
if (conflict_j.is_clause()) {
unsigned cl = conflict_j.clause();
unsigned i = 0;
SASSERT(value(conflict_l) != l_undef);
set const& T = conflict_j.pos()?(*m_T[cl]):(*m_F[cl]);
if (T[0] == conflict_l) {
i = 1;
}
else {
SASSERT(T[1] == conflict_l);
process_antecedent(T[0], num_marks);
i = 2;
}
unsigned sz = T.size();
for (; i < sz; ++i) {
process_antecedent(T[i], num_marks);
}
}
else if (conflict_j.is_decision()) {
--num_marks;
SASSERT(num_marks == 0);
break;
}
else if (conflict_j.is_axiom()) {
IF_VERBOSE(0, verbose_stream() << "axiom " << conflict_l << " " << value(conflict_l) << " " << num_marks << "\n";);
--num_marks;
SASSERT(num_marks == 0);
break;
}
while (true) {
unsigned l = m_trail[idx];
if (is_marked(l)) break;
SASSERT(idx > 0);
--idx;
}
conflict_l = m_trail[idx];
conflict_j = m_justification[conflict_l];
--idx;
--num_marks;
if (num_marks == 0 && value(conflict_l) == l_false) {
++num_marks;
}
reset_mark(conflict_l);
}
while (num_marks > 0);
m_lemma[0] = conflict_l;
TRACE("opt", display_lemma(tout););
SASSERT(value(conflict_l) == l_true);
unsigned new_scope_lvl = 0;
for (unsigned i = 1; i < m_lemma.size(); ++i) {
SASSERT(l_true == value(m_lemma[i]));
new_scope_lvl = std::max(new_scope_lvl, lvl(m_lemma[i]));
reset_mark(m_lemma[i]);
}
pop(scope_lvl() - new_scope_lvl);
SASSERT(l_undef == value(conflict_l));
justification j = add_exists_false(m_lemma.size(), m_lemma.c_ptr());
if (!j.is_axiom()) assign(conflict_l, l_false, j);
return true;
}
void process_antecedent(unsigned antecedent, unsigned& num_marks) {
unsigned alvl = lvl(antecedent);
SASSERT(alvl <= m_conflict_lvl);
if (!is_marked(antecedent) && alvl > 0 && !m_justification[antecedent].is_axiom()) {
mark(antecedent);
if (alvl == m_conflict_lvl || value(antecedent) == l_false) {
++num_marks;
}
else {
m_lemma.push_back(antecedent);
}
}
}
unsigned skip_above_conflict_level() {
unsigned idx = m_trail.size();
if (idx == 0) {
return idx;
}
idx--;
// skip literals from levels above the conflict level
while (lvl(m_trail[idx]) > m_conflict_lvl) {
SASSERT(idx > 0);
idx--;
}
return idx;
}
void set_conflict(unsigned idx, justification const& justification) {
if (!inconsistent()) {
TRACE("opt", tout << "conflict: " << idx << "\n";);
m_inconsistent = true;
m_conflict_j = justification;
m_conflict_l = idx;
}
}
unsigned next_var() {
update_heap();
value_lt lt(m_scored_weights);
std::sort(m_indices.begin(), m_indices.end(), lt);
unsigned idx = m_indices[0];
if (m_scores[idx] == 0) return UINT_MAX;
return idx;
#if 0
int min_val = m_heap.min_value();
if (min_val == -1) {
return UINT_MAX;
}
SASSERT(0 <= min_val && static_cast<unsigned>(min_val) < m_weights.size());
if (m_scores[min_val] == 0) {
return UINT_MAX;
}
return static_cast<unsigned>(min_val);
#endif
}
bool decide() {
unsigned idx = next_var();
if (idx == UINT_MAX) {
return false;
}
else {
push();
TRACE("opt", tout << "decide " << idx << "\n";);
assign(idx, l_true, justification(justification::DECISION));
return true;
}
}
void propagate() {
TRACE("opt", display(tout););
SASSERT(invariant());
while (m_qhead < m_trail.size() && !inconsistent() && !canceled()) {
unsigned idx = m_trail[m_qhead];
++m_qhead;
switch (value(idx)) {
case l_undef:
UNREACHABLE();
break;
case l_true:
propagate(idx, l_false, m_fwatch, m_F);
break;
case l_false:
propagate(idx, l_true, m_twatch, m_T);
break;
}
}
prune_branch();
}
void propagate(unsigned idx, lbool good_val, vector<unsigned_vector>& watch, ptr_vector<set>& Fs)
{
TRACE("opt", tout << idx << " " << value(idx) << "\n";);
unsigned_vector& w = watch[idx];
unsigned sz = w.size();
lbool bad_val = ~good_val;
SASSERT(value(idx) == bad_val);
unsigned l = 0;
for (unsigned i = 0; i < sz && !canceled(); ++i, ++l) {
unsigned clause_id = w[i];
set& F = *Fs[clause_id];
SASSERT(F.size() >= 2);
bool k1 = (F[0] != idx);
bool k2 = !k1;
SASSERT(F[k1] == idx);
SASSERT(value(F[k1]) == bad_val);
if (value(F[k2]) == good_val) {
w[l] = w[i];
continue;
}
bool found = false;
unsigned sz2 = F.size();
for (unsigned j = 2; !found && j < sz2; ++j) {
unsigned idx2 = F[j];
if (value(idx2) != bad_val) {
found = true;
std::swap(F[k1], F[j]);
--l;
watch[idx2].push_back(clause_id);
}
}
if (!found) {
if (value(F[k2]) == bad_val) {
set_conflict(F[k2], justification(clause_id, good_val == l_true));
if (i == l) {
l = sz;
}
else {
for (; i < sz; ++i, ++l) {
w[l] = w[i];
}
}
break;
}
else {
SASSERT(value(F[k2]) == l_undef);
assign(F[k2], good_val, justification(clause_id, good_val == l_true));
w[l] = w[i];
}
}
}
watch[idx].shrink(l);
SASSERT(invariant());
TRACE("opt", tout << idx << " " << value(idx) << "\n";);
SASSERT(value(idx) == bad_val);
}
bool infeasible_lookahead() {
if (m_enable_simplex && L3() >= m_max_weight) {
return true;
}
return
(L1() >= m_max_weight) ||
(L2() >= m_max_weight);
}
void prune_branch() {
if (inconsistent() || !infeasible_lookahead()) {
return;
}
IF_VERBOSE(4, verbose_stream() << "(hs.prune-branch " << m_weight << ")\n";);
m_lemma.reset();
unsigned i = 0;
rational w(0);
for (; i < m_trail.size() && w < m_max_weight; ++i) {
unsigned idx = m_trail[i];
if (m_justification[idx].is_decision()) {
SASSERT(value(idx) == l_true);
m_lemma.push_back(idx);
w += m_weights[idx];
}
}
// undo the lower bounds.
TRACE("opt",
tout << "prune branch: " << m_weight << " ";
display_lemma(tout);
display(tout);
);
justification j = add_exists_false(m_lemma.size(), m_lemma.c_ptr());
unsigned idx = m_lemma.empty()?0:m_lemma[0];
set_conflict(idx, j);
}
// TBD: derive strong inequalities and add them to Simplex.
// x_i1 + .. + x_ik >= k-1 for each subset k from set n: x_1 + .. + x_n >= k
};
hitting_sets::hitting_sets(reslimit& lim) { m_imp = alloc(imp, lim); }
hitting_sets::~hitting_sets() { dealloc(m_imp); }
void hitting_sets::add_weight(rational const& w) { m_imp->add_weight(w); }
void hitting_sets::add_exists_true(unsigned sz, unsigned const* elems) { m_imp->add_exists_true(sz, elems); }
void hitting_sets::add_exists_false(unsigned sz, unsigned const* elems) { m_imp->add_exists_false(sz, elems); }
lbool hitting_sets::compute_lower() { return m_imp->compute_lower(); }
lbool hitting_sets::compute_upper() { return m_imp->compute_upper(); }
rational hitting_sets::get_lower() { return m_imp->get_lower(); }
rational hitting_sets::get_upper() { return m_imp->get_upper(); }
void hitting_sets::set_upper(rational const& r) { return m_imp->set_upper(r); }
bool hitting_sets::get_value(unsigned idx) { return m_imp->get_value(idx); }
void hitting_sets::collect_statistics(::statistics& st) const { m_imp->collect_statistics(st); }
void hitting_sets::reset() { m_imp->reset(); }
};<|fim▁end|> | idx < m_model.size() &&
m_model[idx] == l_true; |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from .main import IMDB
def start():
return IMDB()
config = [{
'name': 'imdb',
'groups': [
{
'tab': 'automation',
'name': 'imdb_automation',
'label': 'IMDB',
'description': 'From any <strong>public</strong> IMDB watchlists. Url should be the RSS link.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_urls_use',
'label': 'Use',
},
{<|fim▁hole|> 'label': 'url',
'type': 'combined',
'combine': ['automation_urls_use', 'automation_urls'],
},
],
},
],
}]<|fim▁end|> | 'name': 'automation_urls', |
<|file_name|>sonarr_list.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlparse
import json
import logging
from collections import MutableSet
import requests
from requests import RequestException
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
log = logging.getLogger('sonarr_list')
class SonarrSet(MutableSet):
supported_ids = ['tvdb_id', 'tvrage_id', 'tvmaze_id', 'imdb_id', 'slug', 'sonarr_id']
schema = {
'type': 'object',
'properties': {
'base_url': {'type': 'string'},
'port': {'type': 'number', 'default': 80},
'api_key': {'type': 'string'},
'include_ended': {'type': 'boolean', 'default': True},
'only_monitored': {'type': 'boolean', 'default': True},
'include_data': {'type': 'boolean', 'default': False}
},
'required': ['api_key', 'base_url'],
'additionalProperties': False
}
def series_request_builder(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received series list request')
url = '%s://%s:%s%s/api/series' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def lookup_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received series lookup request')
url = '%s://%s:%s%s/api/series/lookup?term=' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def profile_list_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received profile list request')
url = '%s://%s:%s%s/api/profile' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def rootfolder_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received rootfolder list request')
url = '%s://%s:%s%s/api/Rootfolder' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def get_json(self, url, headers):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json()
else:
raise plugin.PluginError('Invalid response received from Sonarr: %s' % response.content)
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s. Error: %s' % (url, e))
def post_json(self, url, headers, data):
try:
response = requests.post(url, headers=headers, data=data)
if response.status_code == 201:
return response.json()
else:
raise plugin.PluginError('Invalid response received from Sonarr: %s' % response.content)
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s. Error: %s' % (url, e))
def request_builder(self, base_url, request_type, port, api_key):
if request_type == 'series':
return self.series_request_builder(base_url, port, api_key)
elif request_type == 'profile':
return self.profile_list_request(base_url, port, api_key)
elif request_type == 'lookup':
return self.lookup_request(base_url, port, api_key)
elif request_type == 'rootfolder':
return self.rootfolder_request(base_url, port, api_key)
else:
raise plugin.PluginError('Received unknown API request, aborting.')
def translate_quality(self, quality_name):
"""
Translate Sonnar's qualities to ones recognize by Flexget
"""
if quality_name == 'Raw-HD': # No better match yet in Flexget
return 'remux'
elif quality_name == 'DVD': # No better match yet in Flexget
return 'dvdrip'
else:
return quality_name.replace('-', ' ').lower()
def quality_requirement_builder(self, quality_profile):
allowed_qualities = [self.translate_quality(quality['quality']['name']) for quality in quality_profile['items']
if quality['allowed']]
cutoff = self.translate_quality(quality_profile['cutoff']['name'])
return allowed_qualities, cutoff
def list_entries(self):
series_url, series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
json = self.get_json(series_url, series_headers)
# Retrieves Sonarr's profile list if include_data is set to true
if self.config.get('include_data'):
profile_url, profile_headers = self.request_builder(self.config.get('base_url'), 'profile',
self.config.get('port'),
self.config['api_key'])
profiles_json = self.get_json(profile_url, profile_headers)
entries = []
for show in json:
fg_qualities = '' # Initializes the quality parameter
fg_cutoff = ''
path = None
if not show['monitored'] and self.config.get(
'only_monitored'): # Checks if to retrieve just monitored shows
continue
if show['status'] == 'ended' and not self.config.get('include_ended'): # Checks if to retrieve ended shows
continue
if self.config.get('include_data') and profiles_json: # Check if to retrieve quality & path
path = show.get('path')
for profile in profiles_json:
if profile['id'] == show['profileId']: # Get show's profile data from all possible profiles
fg_qualities, fg_cutoff = self.quality_requirement_builder(profile)
entry = Entry(title=show['title'],
url='',
series_name=show['title'],
tvdb_id=show.get('tvdbId'),
tvrage_id=show.get('tvRageId'),
tvmaze_id=show.get('tvMazeId'),
imdb_id=show.get('imdbid'),
slug=show.get('titleSlug'),
sonarr_id=show.get('id'),
configure_series_target=fg_cutoff)
if len(fg_qualities) > 1:
entry['configure_series_qualities'] = fg_qualities
elif len(fg_qualities) == 1:
entry['configure_series_quality'] = fg_qualities[0]
else:
entry['configure_series_quality'] = fg_qualities
if path:
entry['configure_series_path'] = path
if entry.isvalid():
log.debug('returning entry %s', entry)
entries.append(entry)
else:
log.error('Invalid entry created? %s' % entry)
continue
return entries
def add_show(self, entry):
log.debug('searching for show match for %s using Sonarr', entry)
lookup_series_url, lookup_series_headers = self.request_builder(self.config.get('base_url'), 'lookup',
self.config.get('port'), self.config['api_key'])
if entry.get('tvdb_id'):
lookup_series_url += 'tvdb:%s' % entry.get('tvdb_id')
else:
lookup_series_url += entry.get('title')
lookup_results = self.get_json(lookup_series_url, headers=lookup_series_headers)
if not lookup_results:
log.debug('could not find series match to %s', entry)
return
else:
if len(lookup_results) > 1:
log.debug('got multiple results for Sonarr, using first one')
show = lookup_results[0]
log.debug('using show %s', show)
# Getting rootfolder
rootfolder_series_url, rootfolder_series_headers = self.request_builder(self.config.get('base_url'),
'rootfolder', self.config.get('port'),
self.config['api_key'])
rootfolder = self.get_json(rootfolder_series_url, headers=rootfolder_series_headers)
# Setting defaults for Sonarr
show['profileId'] = 1
show['qualityProfileId '] = 1
show['rootFolderPath'] = rootfolder[0]['path']
series_url, series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
log.debug('adding show %s to sonarr', show)
returned_show = self.post_json(series_url, headers=series_headers, data=json.dumps(show))
return returned_show
def remove_show(self, show):
delete_series_url, delete_series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
delete_series_url += '/%s' % show.get('sonarr_id')
requests.delete(delete_series_url, headers=delete_series_headers)
@property
def shows(self):
if self._shows is None:
self._shows = self.list_entries()
return self._shows
def _find_entry(self, entry):
for sb_entry in self.shows:
if any(entry.get(id) is not None and entry[id] == sb_entry[id] for id in self.supported_ids):
return sb_entry<|fim▁hole|> return sb_entry
def _from_iterable(self, it):
# TODO: is this the right answer? the returned object won't have our custom __contains__ logic
return set(it)
def __init__(self, config):
self.config = config
self._shows = None
def __iter__(self):
return (entry for entry in self.shows)
def __len__(self):
return len(self.shows)
def __contains__(self, entry):
return self._find_entry(entry) is not None
def add(self, entry):
if not self._find_entry(entry):
show = self.add_show(entry)
self._shows = None
log.verbose('Successfully added show %s to Sonarr', show['title'])
else:
log.debug('entry %s already exists in Sonarr list', entry)
def discard(self, entry):
show = self._find_entry(entry)
if not show:
log.debug('Did not find matching show in Sonarr for %s, skipping', entry)
return
self.remove_show(show)
log.verbose('removed show %s from Sonarr', show['title'])
@property
def immutable(self):
return False
@property
def online(self):
""" Set the online status of the plugin, online plugin should be treated differently in certain situations,
like test mode"""
return True
def get(self, entry):
return self._find_entry(entry)
class SonarrList(object):
schema = SonarrSet.schema
@staticmethod
def get_list(config):
return SonarrSet(config)
def on_task_input(self, task, config):
return list(SonarrSet(config))
@event('plugin.register')
def register_plugin():
plugin.register(SonarrList, 'sonarr_list', api_ver=2, groups=['list'])<|fim▁end|> | if entry.get('title').lower() == sb_entry.get('title').lower(): |
<|file_name|>manifest.py<|end_file_name|><|fim▁begin|>import io
import os
import sys
from atomicwrites import atomic_write
from copy import deepcopy
from multiprocessing import Pool, cpu_count
from six import ensure_text
from . import jsonlib
from . import vcs
from .item import (ConformanceCheckerTest,
CrashTest,
ManifestItem,
ManualTest,
PrintRefTest,
RefTest,
SupportFile,
TestharnessTest,
VisualTest,
WebDriverSpecTest)
from .log import get_logger
from .sourcefile import SourceFile
from .typedata import TypeData
MYPY = False
if MYPY:
# MYPY is set to True when run under Mypy.
from logging import Logger
from typing import Any
from typing import Container
from typing import Dict
from typing import IO
from typing import Iterator
from typing import Iterable
from typing import Optional
from typing import Set
from typing import Text
from typing import Tuple
from typing import Type
from typing import Union
CURRENT_VERSION = 8 # type: int
class ManifestError(Exception):
pass
class ManifestVersionMismatch(ManifestError):
pass
class InvalidCacheError(Exception):
pass
item_classes = {u"testharness": TestharnessTest,
u"reftest": RefTest,
u"print-reftest": PrintRefTest,
u"crashtest": CrashTest,
u"manual": ManualTest,
u"wdspec": WebDriverSpecTest,
u"conformancechecker": ConformanceCheckerTest,
u"visual": VisualTest,
u"support": SupportFile} # type: Dict[Text, Type[ManifestItem]]
def compute_manifest_items(source_file):
# type: (SourceFile) -> Tuple[Tuple[Text, ...], Text, Set[ManifestItem], Text]
rel_path_parts = source_file.rel_path_parts
new_type, manifest_items = source_file.manifest_items()
file_hash = source_file.hash
return rel_path_parts, new_type, set(manifest_items), file_hash
if MYPY:
ManifestDataType = Dict[Any, TypeData]
else:
ManifestDataType = dict
class ManifestData(ManifestDataType):
def __init__(self, manifest):
# type: (Manifest) -> None
"""Dictionary subclass containing a TypeData instance for each test type,
keyed by type name"""
self.initialized = False # type: bool
for key, value in item_classes.items():
self[key] = TypeData(manifest, value)
self.initialized = True
self.json_obj = None # type: None
def __setitem__(self, key, value):
# type: (Text, TypeData) -> None
if self.initialized:
raise AttributeError
dict.__setitem__(self, key, value)
<|fim▁hole|> # type: () -> Set[Text]
"""Get a list of all paths containing test items
without actually constructing all the items"""
rv = set() # type: Set[Text]
for item_data in self.values():
for item in item_data:
rv.add(os.path.sep.join(item))
return rv
def type_by_path(self):
# type: () -> Dict[Tuple[Text, ...], Text]
rv = {}
for item_type, item_data in self.items():
for item in item_data:
rv[item] = item_type
return rv
class Manifest(object):
def __init__(self, tests_root, url_base="/"):
# type: (Text, Text) -> None
assert url_base is not None
self._data = ManifestData(self) # type: ManifestData
self.tests_root = tests_root # type: Text
self.url_base = url_base # type: Text
def __iter__(self):
# type: () -> Iterator[Tuple[Text, Text, Set[ManifestItem]]]
return self.itertypes()
def itertypes(self, *types):
# type: (*Text) -> Iterator[Tuple[Text, Text, Set[ManifestItem]]]
for item_type in (types or sorted(self._data.keys())):
for path in self._data[item_type]:
rel_path = os.sep.join(path)
tests = self._data[item_type][path]
yield item_type, rel_path, tests
def iterpath(self, path):
# type: (Text) -> Iterable[ManifestItem]
tpath = tuple(path.split(os.path.sep))
for type_tests in self._data.values():
i = type_tests.get(tpath, set())
assert i is not None
for test in i:
yield test
def iterdir(self, dir_name):
# type: (Text) -> Iterable[ManifestItem]
tpath = tuple(dir_name.split(os.path.sep))
tpath_len = len(tpath)
for type_tests in self._data.values():
for path, tests in type_tests.items():
if path[:tpath_len] == tpath:
for test in tests:
yield test
def update(self, tree, parallel=True):
# type: (Iterable[Tuple[Text, Optional[Text], bool]], bool) -> bool
"""Update the manifest given an iterable of items that make up the updated manifest.
The iterable must either generate tuples of the form (SourceFile, True) for paths
that are to be updated, or (path, False) for items that are not to be updated. This
unusual API is designed as an optimistaion meaning that SourceFile items need not be
constructed in the case we are not updating a path, but the absence of an item from
the iterator may be used to remove defunct entries from the manifest."""
logger = get_logger()
changed = False
# Create local variable references to these dicts so we avoid the
# attribute access in the hot loop below
data = self._data
types = data.type_by_path()
remaining_manifest_paths = set(types)
to_update = []
for path, file_hash, updated in tree:
path_parts = tuple(path.split(os.path.sep))
is_new = path_parts not in remaining_manifest_paths
if not updated and is_new:
# This is kind of a bandaid; if we ended up here the cache
# was invalid but we've been using it anyway. That's obviously
# bad; we should fix the underlying issue that we sometimes
# use an invalid cache. But at least this fixes the immediate
# problem
raise InvalidCacheError
if not updated:
remaining_manifest_paths.remove(path_parts)
else:
assert self.tests_root is not None
source_file = SourceFile(self.tests_root,
path,
self.url_base,
file_hash)
hash_changed = False # type: bool
if not is_new:
if file_hash is None:
file_hash = source_file.hash
remaining_manifest_paths.remove(path_parts)
old_type = types[path_parts]
old_hash = data[old_type].hashes[path_parts]
if old_hash != file_hash:
hash_changed = True
del data[old_type][path_parts]
if is_new or hash_changed:
to_update.append(source_file)
if to_update:
logger.debug("Computing manifest update for %s items" % len(to_update))
changed = True
# 25 items was derived experimentally (2020-01) to be approximately the
# point at which it is quicker to create a Pool and parallelize update.
pool = None
if parallel and len(to_update) > 25 and cpu_count() > 1:
# On Python 3 on Windows, using >= MAXIMUM_WAIT_OBJECTS processes
# causes a crash in the multiprocessing module. Whilst this enum
# can technically have any value, it is usually 64. For safety,
# restrict manifest regeneration to 48 processes on Windows.
#
# See https://bugs.python.org/issue26903 and https://bugs.python.org/issue40263
processes = cpu_count()
if sys.platform == "win32" and processes > 48:
processes = 48
pool = Pool(processes)
# chunksize set > 1 when more than 10000 tests, because
# chunking is a net-gain once we get to very large numbers
# of items (again, experimentally, 2020-01)
chunksize = max(1, len(to_update) // 10000)
logger.debug("Doing a multiprocessed update. CPU count: %s, "
"processes: %s, chunksize: %s" % (cpu_count(), processes, chunksize))
results = pool.imap_unordered(compute_manifest_items,
to_update,
chunksize=chunksize
) # type: Iterator[Tuple[Tuple[Text, ...], Text, Set[ManifestItem], Text]]
else:
results = map(compute_manifest_items, to_update)
for result in results:
rel_path_parts, new_type, manifest_items, file_hash = result
data[new_type][rel_path_parts] = manifest_items
data[new_type].hashes[rel_path_parts] = file_hash
# Make sure to terminate the Pool, to avoid hangs on Python 3.
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.pool.Pool
if pool is not None:
pool.terminate()
if remaining_manifest_paths:
changed = True
for rel_path_parts in remaining_manifest_paths:
for test_data in data.values():
if rel_path_parts in test_data:
del test_data[rel_path_parts]
return changed
def to_json(self, caller_owns_obj=True):
# type: (bool) -> Dict[Text, Any]
"""Dump a manifest into a object which can be serialized as JSON
If caller_owns_obj is False, then the return value remains
owned by the manifest; it is _vitally important_ that _no_
(even read) operation is done on the manifest, as otherwise
objects within the object graph rooted at the return value can
be mutated. This essentially makes this mode very dangerous
and only to be used under extreme care.
"""
out_items = {
test_type: type_paths.to_json()
for test_type, type_paths in self._data.items() if type_paths
}
if caller_owns_obj:
out_items = deepcopy(out_items)
rv = {"url_base": self.url_base,
"items": out_items,
"version": CURRENT_VERSION} # type: Dict[Text, Any]
return rv
@classmethod
def from_json(cls, tests_root, obj, types=None, callee_owns_obj=False):
# type: (Text, Dict[Text, Any], Optional[Container[Text]], bool) -> Manifest
"""Load a manifest from a JSON object
This loads a manifest for a given local test_root path from an
object obj, potentially partially loading it to only load the
types given by types.
If callee_owns_obj is True, then ownership of obj transfers
to this function when called, and the caller must never mutate
the obj or anything referred to in the object graph rooted at
obj.
"""
version = obj.get("version")
if version != CURRENT_VERSION:
raise ManifestVersionMismatch
self = cls(tests_root, url_base=obj.get("url_base", "/"))
if not hasattr(obj, "items"):
raise ManifestError
for test_type, type_paths in obj["items"].items():
if test_type not in item_classes:
raise ManifestError
if types and test_type not in types:
continue
if not callee_owns_obj:
type_paths = deepcopy(type_paths)
self._data[test_type].set_json(type_paths)
return self
def load(tests_root, manifest, types=None):
# type: (Text, Union[IO[bytes], Text], Optional[Container[Text]]) -> Optional[Manifest]
logger = get_logger()
logger.warning("Prefer load_and_update instead")
return _load(logger, tests_root, manifest, types)
__load_cache = {} # type: Dict[Text, Manifest]
def _load(logger, # type: Logger
tests_root, # type: Text
manifest, # type: Union[IO[bytes], Text]
types=None, # type: Optional[Container[Text]]
allow_cached=True # type: bool
):
# type: (...) -> Optional[Manifest]
manifest_path = (manifest if isinstance(manifest, str)
else manifest.name)
if allow_cached and manifest_path in __load_cache:
return __load_cache[manifest_path]
if isinstance(manifest, str):
if os.path.exists(manifest):
logger.debug("Opening manifest at %s" % manifest)
else:
logger.debug("Creating new manifest at %s" % manifest)
try:
with io.open(manifest, "r", encoding="utf-8") as f:
rv = Manifest.from_json(tests_root,
jsonlib.load(f),
types=types,
callee_owns_obj=True)
except IOError:
return None
except ValueError:
logger.warning("%r may be corrupted", manifest)
return None
else:
rv = Manifest.from_json(tests_root,
jsonlib.load(manifest),
types=types,
callee_owns_obj=True)
if allow_cached:
__load_cache[manifest_path] = rv
return rv
def load_and_update(tests_root, # type: Union[Text, bytes]
manifest_path, # type: Union[Text, bytes]
url_base, # type: Text
update=True, # type: bool
rebuild=False, # type: bool
metadata_path=None, # type: Optional[Union[Text, bytes]]
cache_root=None, # type: Optional[Union[Text, bytes]]
working_copy=True, # type: bool
types=None, # type: Optional[Container[Text]]
write_manifest=True, # type: bool
allow_cached=True, # type: bool
parallel=True # type: bool
):
# type: (...) -> Manifest
# This function is now a facade for the purposes of type conversion, so that
# the external API can accept paths as text or (utf8) bytes, but internal
# functions always use Text.
metadata_path_text = ensure_text(metadata_path) if metadata_path is not None else None
cache_root_text = ensure_text(cache_root) if cache_root is not None else None
return _load_and_update(ensure_text(tests_root),
ensure_text(manifest_path),
url_base,
update=update,
rebuild=rebuild,
metadata_path=metadata_path_text,
cache_root=cache_root_text,
working_copy=working_copy,
types=types,
write_manifest=write_manifest,
allow_cached=allow_cached,
parallel=parallel)
def _load_and_update(tests_root, # type: Text
manifest_path, # type: Text
url_base, # type: Text
update=True, # type: bool
rebuild=False, # type: bool
metadata_path=None, # type: Optional[Text]
cache_root=None, # type: Optional[Text]
working_copy=True, # type: bool
types=None, # type: Optional[Container[Text]]
write_manifest=True, # type: bool
allow_cached=True, # type: bool
parallel=True # type: bool
):
# type: (...) -> Manifest
logger = get_logger()
manifest = None
if not rebuild:
try:
manifest = _load(logger,
tests_root,
manifest_path,
types=types,
allow_cached=allow_cached)
except ManifestVersionMismatch:
logger.info("Manifest version changed, rebuilding")
except ManifestError:
logger.warning("Failed to load manifest, rebuilding")
if manifest is not None and manifest.url_base != url_base:
logger.info("Manifest url base did not match, rebuilding")
manifest = None
if manifest is None:
manifest = Manifest(tests_root, url_base)
rebuild = True
update = True
if rebuild or update:
logger.info("Updating manifest")
for retry in range(2):
try:
tree = vcs.get_tree(tests_root, manifest, manifest_path, cache_root,
working_copy, rebuild)
changed = manifest.update(tree, parallel)
break
except InvalidCacheError:
logger.warning("Manifest cache was invalid, doing a complete rebuild")
rebuild = True
else:
# If we didn't break there was an error
raise
if write_manifest and changed:
write(manifest, manifest_path)
tree.dump_caches()
return manifest
def write(manifest, manifest_path):
# type: (Manifest, Text) -> None
dir_name = os.path.dirname(manifest_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with atomic_write(manifest_path, overwrite=True) as f:
# Use ',' instead of the default ', ' separator to prevent trailing
# spaces: https://docs.python.org/2/library/json.html#json.dump
jsonlib.dump_dist(manifest.to_json(caller_owns_obj=True), f)
f.write("\n")<|fim▁end|> | def paths(self): |
<|file_name|>import3.rs<|end_file_name|><|fim▁begin|>// error-pattern: unresolved
use main::bar;
<|fim▁hole|><|fim▁end|> | fn main() { println!("foo"); } |
<|file_name|>dimensions.py<|end_file_name|><|fim▁begin|># encoding: utf-8
#
#
# self Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with self file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from collections import Mapping
import mo_dots as dot
from mo_math import SUM
from pyLibrary.queries.containers import Container
from pyLibrary.queries.domains import Domain, ALGEBRAIC, KNOWN
from mo_dots import Null, coalesce, join_field, split_field, Data
from mo_dots.lists import FlatList
from mo_times.timer import Timer
from mo_logs import Log
from mo_dots import wrap, listwrap
DEFAULT_QUERY_LIMIT = 20
class Dimension(Container):
__slots__ = ["name", "full_name", "where", "type", "limit", "index", "parent", "edges", "partitions", "fields"]
def __init__(self, dim, parent, jx):
dim = wrap(dim)
self.name = dim.name
self.parent = coalesce(parent)
self.full_name = join_field(split_field(self.parent.full_name)+[self.name])
self.edges = None # FOR NOW
dot.set_default(self, dim)
self.where = dim.where
self.type = coalesce(dim.type, "set")
self.limit = coalesce(dim.limit, DEFAULT_QUERY_LIMIT)
self.index = coalesce(dim.index, coalesce(parent, Null).index, jx.settings.index)
if not self.index:
Log.error("Expecting an index name")
# ALLOW ACCESS TO SUB-PART BY NAME (IF ONLY THERE IS NO NAME COLLISION)
self.edges = Data()
for e in listwrap(dim.edges):
new_e = Dimension(e, self, jx)
self.edges[new_e.full_name] = new_e
self.partitions = wrap(coalesce(dim.partitions, []))
parse_partition(self)
fields = coalesce(dim.field, dim.fields)
if not fields:
return # NO FIELDS TO SEARCH
elif isinstance(fields, Mapping):
self.fields = wrap(fields)
edges = wrap([{"name": k, "value": v, "allowNulls": False} for k, v in self.fields.items()])
else:
self.fields = listwrap(fields)
edges = wrap([{"name": f, "value": f, "index": i, "allowNulls": False} for i, f in enumerate(self.fields)])
if dim.partitions:
return # ALREADY HAVE PARTS
if self.type not in KNOWN - ALGEBRAIC:
return # PARTS OR TOO FUZZY (OR TOO NUMEROUS) TO FETCH
jx.get_columns()
with Timer("Get parts of {{name}}", {"name": self.name}):
parts = jx.query({
"from": self.index,
"select": {"name": "count", "aggregate": "count"},
"edges": edges,
"where": self.where,
"limit": self.limit
})
Log.note("{{name}} has {{num}} parts", name= self.name, num= len(parts))
d = parts.edges[0].domain
if dim.path:
if len(edges) > 1:
Log.error("Not supported yet")
# EACH TERM RETURNED IS A PATH INTO A PARTITION TREE
temp = Data(partitions=[])
for i, count in enumerate(parts):
a = dim.path(d.getEnd(d.partitions[i]))
if not isinstance(a, list):
Log.error("The path function on " + dim.name + " must return an ARRAY of parts")
addParts(
temp,
dim.path(d.getEnd(d.partitions[i])),
count,
0
)
self.value = coalesce(dim.value, "name")
self.partitions = temp.partitions
elif isinstance(fields, Mapping):
self.value = "name" # USE THE "name" ATTRIBUTE OF PARTS
<|fim▁hole|> if p:
partitions.append({
"value": g,
"where": {"and": [
{"term": {e.value: g[e.name]}}
for e in edges
]},
"count": int(p)
})
self.partitions = partitions
elif len(edges) == 1:
self.value = "name" # USE THE "name" ATTRIBUTE OF PARTS
# SIMPLE LIST OF PARTS RETURNED, BE SURE TO INTERRELATE THEM
self.partitions = wrap([
{
"name": str(d.partitions[i].name), # CONVERT TO STRING
"value": d.getEnd(d.partitions[i]),
"where": {"term": {edges[0].value: d.partitions[i].value}},
"count": count
}
for i, count in enumerate(parts)
])
self.order = {p.value: i for i, p in enumerate(self.partitions)}
elif len(edges) == 2:
self.value = "name" # USE THE "name" ATTRIBUTE OF PARTS
d2 = parts.edges[1].domain
# SIMPLE LIST OF PARTS RETURNED, BE SURE TO INTERRELATE THEM
array = parts.data.values()[0].cube # DIG DEEP INTO RESULT (ASSUME SINGLE VALUE CUBE, WITH NULL AT END)
def edges2value(*values):
if isinstance(fields, Mapping):
output = Data()
for e, v in zip(edges, values):
output[e.name] = v
return output
else:
return tuple(values)
self.partitions = wrap([
{
"name": str(d.partitions[i].name), # CONVERT TO STRING
"value": d.getEnd(d.partitions[i]),
"where": {"term": {edges[0].value: d.partitions[i].value}},
"count": SUM(subcube),
"partitions": [
{
"name": str(d2.partitions[j].name), # CONVERT TO STRING
"value": edges2value(d.getEnd(d.partitions[i]), d2.getEnd(d2.partitions[j])),
"where": {"and": [
{"term": {edges[0].value: d.partitions[i].value}},
{"term": {edges[1].value: d2.partitions[j].value}}
]},
"count": count2
}
for j, count2 in enumerate(subcube)
if count2 > 0 # ONLY INCLUDE PROPERTIES THAT EXIST
]
}
for i, subcube in enumerate(array)
])
else:
Log.error("Not supported")
parse_partition(self) # RELATE THE PARTS TO THE PARENTS
def __getitem__(self, item):
return self.__getattr__(item)
def __getattr__(self, key):
"""
RETURN CHILD EDGE OR PARTITION BY NAME
"""
#TODO: IGNORE THE STANDARD DIMENSION PROPERTIES TO AVOID ACCIDENTAL SELECTION OF EDGE OR PART
if key in Dimension.__slots__:
return None
e = self.edges[key]
if e:
return e
for p in self.partitions:
if p.name == key:
return p
return Null
def getDomain(self, **kwargs):
# kwargs.depth IS MEANT TO REACH INTO SUB-PARTITIONS
kwargs = wrap(kwargs)
kwargs.depth = coalesce(kwargs.depth, len(self.fields)-1 if isinstance(self.fields, list) else None)
if not self.partitions and self.edges:
# USE EACH EDGE AS A PARTITION, BUT isFacet==True SO IT ALLOWS THE OVERLAP
partitions = [
{
"name": v.name,
"value": v.name,
"where": v.where,
"style": v.style,
"weight": v.weight # YO! WHAT DO WE *NOT* COPY?
}
for i, v in enumerate(self.edges)
if i < coalesce(self.limit, DEFAULT_QUERY_LIMIT) and v.where
]
self.isFacet = True
elif kwargs.depth == None: # ASSUME self.fields IS A dict
partitions = FlatList()
for i, part in enumerate(self.partitions):
if i >= coalesce(self.limit, DEFAULT_QUERY_LIMIT):
break
partitions.append({
"name":part.name,
"value":part.value,
"where":part.where,
"style":coalesce(part.style, part.parent.style),
"weight":part.weight # YO! WHAT DO WE *NOT* COPY?
})
elif kwargs.depth == 0:
partitions = [
{
"name":v.name,
"value":v.value,
"where":v.where,
"style":v.style,
"weight":v.weight # YO! WHAT DO WE *NOT* COPY?
}
for i, v in enumerate(self.partitions)
if i < coalesce(self.limit, DEFAULT_QUERY_LIMIT)]
elif kwargs.depth == 1:
partitions = FlatList()
rownum = 0
for i, part in enumerate(self.partitions):
if i >= coalesce(self.limit, DEFAULT_QUERY_LIMIT):
continue
rownum += 1
try:
for j, subpart in enumerate(part.partitions):
partitions.append({
"name":join_field(split_field(subpart.parent.name) + [subpart.name]),
"value":subpart.value,
"where":subpart.where,
"style":coalesce(subpart.style, subpart.parent.style),
"weight":subpart.weight # YO! WHAT DO WE *NOT* COPY?
})
except Exception as e:
Log.error("", e)
else:
Log.error("deeper than 2 is not supported yet")
return Domain(
type=self.type,
name=self.name,
partitions=wrap(partitions),
min=self.min,
max=self.max,
interval=self.interval,
# THE COMPLICATION IS THAT SOMETIMES WE WANT SIMPLE PARTITIONS, LIKE
# STRINGS, DATES, OR NUMBERS. OTHER TIMES WE WANT PARTITION OBJECTS
# WITH NAME, VALUE, AND OTHER MARKUP.
# USUALLY A "set" IS MEANT TO BE SIMPLE, BUT THE end() FUNCTION IS
# OVERRIDES EVERYTHING AND IS EXPLICIT. - NOT A GOOD SOLUTION BECAUSE
# end() IS USED BOTH TO INDICATE THE QUERY PARTITIONS *AND* DISPLAY
# COORDINATES ON CHARTS
# PLEASE SPLIT end() INTO value() (replacing the string value) AND
# label() (for presentation)
value="name" if not self.value and self.partitions else self.value,
key="value",
label=coalesce(self.label, (self.type == "set" and self.name)),
end=coalesce(self.end, (self.type == "set" and self.name)),
isFacet=self.isFacet,
dimension=self
)
def getSelect(self, **kwargs):
if self.fields:
if len(self.fields) == 1:
return Data(
name=self.full_name,
value=self.fields[0],
aggregate="none"
)
else:
return Data(
name=self.full_name,
value=self.fields,
aggregate="none"
)
domain = self.getDomain(**kwargs)
if not domain.getKey:
Log.error("Should not happen")
if not domain.NULL:
Log.error("Should not happen")
return Data(
name=self.full_name,
domain=domain,
aggregate="none"
)
def addParts(parentPart, childPath, count, index):
"""
BUILD A hierarchy BY REPEATEDLY CALLING self METHOD WITH VARIOUS childPaths
count IS THE NUMBER FOUND FOR self PATH
"""
if index == None:
index = 0
if index == len(childPath):
return
c = childPath[index]
parentPart.count = coalesce(parentPart.count, 0) + count
if parentPart.partitions == None:
parentPart.partitions = FlatList()
for i, part in enumerate(parentPart.partitions):
if part.name == c.name:
addParts(part, childPath, count, index + 1)
return
parentPart.partitions.append(c)
addParts(c, childPath, count, index + 1)
def parse_partition(part):
for p in part.partitions:
if part.index:
p.index = part.index # COPY INDEX DOWN
parse_partition(p)
p.value = coalesce(p.value, p.name)
p.parent = part
if not part.where:
if len(part.partitions) > 100:
Log.error("Must define an where on {{name}} there are too many partitions ({{num_parts}})",
name= part.name,
num_parts= len(part.partitions))
# DEFAULT where IS THE UNION OF ALL CHILD FILTERS
if part.partitions:
part.where = {"or": part.partitions.where}<|fim▁end|> | partitions = FlatList()
for g, p in parts.groupby(edges): |
<|file_name|>udp.rs<|end_file_name|><|fim▁begin|>use {io, Ready, Poll, PollOpt, Token};
use event::Evented;
use unix::EventedFd;
use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::os::unix::io::{RawFd, IntoRawFd, AsRawFd, FromRawFd};
#[allow(unused_imports)] // only here for Rust 1.8
use net2::UdpSocketExt;
#[derive(Debug)]
pub struct UdpSocket {
io: net::UdpSocket,
}
impl UdpSocket {
pub fn new(socket: net::UdpSocket) -> io::Result<UdpSocket> {
socket.set_nonblocking(true)?;
Ok(UdpSocket {
io: socket,
})
}
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.io.local_addr()
}
pub fn try_clone(&self) -> io::Result<UdpSocket> {
self.io.try_clone().map(|io| {
UdpSocket {
io: io,
}
})
}
pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
self.io.send_to(buf, target)
}
pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.io.recv_from(buf)
}
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.io.send(buf)
}
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.io.recv(buf)
}
pub fn connect(&self, addr: SocketAddr)
-> io::Result<()> {
self.io.connect(addr)
}
pub fn broadcast(&self) -> io::Result<bool> {
self.io.broadcast()
}
pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
self.io.set_broadcast(on)
}
pub fn multicast_loop_v4(&self) -> io::Result<bool> {
self.io.multicast_loop_v4()
}
pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
self.io.set_multicast_loop_v4(on)
}
pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
self.io.multicast_ttl_v4()
}
pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
self.io.set_multicast_ttl_v4(ttl)
}
pub fn multicast_loop_v6(&self) -> io::Result<bool> {
self.io.multicast_loop_v6()
}
pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
self.io.set_multicast_loop_v6(on)
}
pub fn ttl(&self) -> io::Result<u32> {
self.io.ttl()
}
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.io.set_ttl(ttl)
}
pub fn join_multicast_v4(&self,
multiaddr: &Ipv4Addr,
interface: &Ipv4Addr) -> io::Result<()> {
self.io.join_multicast_v4(multiaddr, interface)
}
pub fn join_multicast_v6(&self,
multiaddr: &Ipv6Addr,
interface: u32) -> io::Result<()> {
self.io.join_multicast_v6(multiaddr, interface)
}
pub fn leave_multicast_v4(&self,
multiaddr: &Ipv4Addr,
interface: &Ipv4Addr) -> io::Result<()> {
self.io.leave_multicast_v4(multiaddr, interface)
}
pub fn leave_multicast_v6(&self,
multiaddr: &Ipv6Addr,
interface: u32) -> io::Result<()> {
self.io.leave_multicast_v6(multiaddr, interface)
}
pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
self.io.set_only_v6(only_v6)
}
<|fim▁hole|> self.io.only_v6()
}
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.io.take_error()
}
}
impl Evented for UdpSocket {
fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts)
}
fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> {
EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
EventedFd(&self.as_raw_fd()).deregister(poll)
}
}
impl FromRawFd for UdpSocket {
unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket {
UdpSocket {
io: net::UdpSocket::from_raw_fd(fd),
}
}
}
impl IntoRawFd for UdpSocket {
fn into_raw_fd(self) -> RawFd {
self.io.into_raw_fd()
}
}
impl AsRawFd for UdpSocket {
fn as_raw_fd(&self) -> RawFd {
self.io.as_raw_fd()
}
}<|fim▁end|> | pub fn only_v6(&self) -> io::Result<bool> { |
<|file_name|>manufactureGAP_vertical.py<|end_file_name|><|fim▁begin|>import polyadcirc.run_framework.domain as dom
import polyadcirc.pyGriddata.manufacture_gap as manu
grid_dir = '.'
domain = dom.domain(grid_dir)
domain.read_spatial_grid()
x_values = [n.x for n in domain.node.values()]
y_values = [n.y for n in domain.node.values()]
xr = max(x_values)
xl = min(x_values)
yu = max(y_values)
yl = min(y_values)
x_points = (xl, 150, 750, xr)
p1 = [0, 0, 0, 1]
p2 = [0, 0, 0, 1]<|fim▁hole|>rand_rect = manu.random_vertical(x_points, yl, yu, [1, 2, 3, 4], p_sections=
[p1, p2, p3])
manu.write_gapfile(rand_rect, xl, yl, 'sections.asc')<|fim▁end|> | p3 = [.2, .3, .4, .1] |
<|file_name|>split_list__paged_rt__int__int32_t.cpp<|end_file_name|><|fim▁begin|>#include "benchmark/benchmark.h"
#include "c4/log.hpp"
#include "c4/allocator.hpp"
#include "../list_types.hpp"
namespace bm = benchmark;
namespace c4 {
template< class List >
void BM_ListPushBack(bm::State& st)
{
List li;<|fim▁hole|> {
for(int i = 0, e = st.range(0); i < e; ++i)
{
if(li.size() == li.max_size()) li.clear();
li.push_back(v);
++count;
}
li.clear();
}
st.SetComplexityN(st.range(0));
st.SetItemsProcessed(count);
st.SetBytesProcessed(count * sizeof(T));
}
BENCHMARK_TEMPLATE(BM_ListPushBack, split_list__paged_rt< int C4_COMMA int32_t >)
->RangeMultiplier(2)
->Range(4, 1<<19)
->Complexity();
} // end namespace c4
BENCHMARK_MAIN()<|fim▁end|> | using T = typename List::value_type;
T v{};
size_t count = 0;
while(st.KeepRunning()) |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
DEBUG = True
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'REPLACE_ME_BEFORE_PRODUCTION'
ALLOWED_HOSTS = ['.spades.com']
AUTH_USER_MODEL = 'deck.User'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'deck',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'spades.urls'
WSGI_APPLICATION = 'spades.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'database'),
}
}
<|fim▁hole|># https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'MST'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = BASE_DIR+'/media/'
MEDIA_URL = '/media/'<|fim▁end|> | # Internationalization |
<|file_name|>infeed_ops.cc<|end_file_name|><|fim▁begin|>/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and<|fim▁hole|>#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("InfeedDequeue")
.Output("output: dtype")
.Attr("dtype: type")
.Attr("shape: shape")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape));
TensorShapeProto shape_proto;
shape.AsProto(&shape_proto);
ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeProto(shape_proto, &out));
c->set_output(0, out);
return Status::OK();
})
.Doc(R"doc(
A placeholder op for a value that will be fed into the computation.
output: A tensor that will be provided using the infeed mechanism.
dtype: The type of elements in the tensor.
shape: The shape of the tensor.
)doc");
REGISTER_OP("InfeedEnqueue")
.Input("input: dtype")
.Attr("dtype: type")
.Attr("shape: shape = {}")
.Attr("device_ordinal: int = -1")
.SetIsStateful()
.Doc(R"doc(
An op which feeds a single Tensor value into the computation.
input: A tensor that will be provided using the infeed mechanism.
dtype: The type of elements in the tensor.
shape: The shape of the tensor.
device_ordinal: The TPU device to use. This should be -1 when the Op
is running on a TPU device, and >= 0 when the Op is running on the CPU
device.
)doc");
REGISTER_OP("InfeedEnqueueTuple")
.Input("inputs: dtypes")
.Attr("dtypes: list(type)")
.Attr("shapes: list(shape)")
.Attr("device_ordinal: int = -1")
.SetIsStateful()
.Doc(R"doc(
An op which feeds multiple Tensor values into the computation as an XLA tuple.
inputs: A list of tensors that will be provided using the infeed mechanism.
dtypes: The element types of each element in `inputs`.
shapes: The shapes of each tensor in `inputs`.
device_ordinal: The TPU device to use. This should be -1 when the Op
is running on a TPU device, and >= 0 when the Op is running on the CPU
device.
)doc");
REGISTER_OP("InfeedDequeueTuple")
.Output("outputs: dtypes")
.Attr("dtypes: list(type)")
.Attr("shapes: list(shape)")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
std::vector<PartialTensorShape> shapes;
TF_RETURN_IF_ERROR(c->GetAttr("shapes", &shapes));
for (int i = 0; i < shapes.size(); ++i) {
TensorShapeProto shape_proto;
shapes[i].AsProto(&shape_proto);
ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeProto(shape_proto, &out));
c->set_output(i, out);
}
return Status::OK();
})
.Doc(R"doc(
A placeholder op for multiple values that will be fed into the computation
simultaneously as an XLA tuple.
outputs: A list of tensors that will be provided using the infeed mechanism.
dtypes: The element types of each element in `outputs`.
shapes: The shapes of each tensor in `outputs`.
)doc");
} // namespace tensorflow<|fim▁end|> | limitations under the License.
==============================================================================*/
|
<|file_name|>coherence-pair-covered-uncovered-1.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that the same coverage rules apply even if the local type appears in the
// list of type parameters, not the self type.
// aux-build:coherence-lib.rs
<|fim▁hole|>
impl<T, U> Remote1<Pair<T, Local<U>>> for i32 { }
//~^ ERROR type parameter `T` must be used as the type parameter for some local type
fn main() { }<|fim▁end|> | extern crate "coherence-lib" as lib;
use lib::{Remote1, Pair};
pub struct Local<T>(T); |
<|file_name|>tests.rs<|end_file_name|><|fim▁begin|>use crate::direct_transport::{DirectTransport, DirectTransportOptions};
use crate::router::{Router, RouterOptions};
use crate::transport::Transport;
use crate::worker::WorkerSettings;
use crate::worker_manager::WorkerManager;
use futures_lite::future;
use std::env;
async fn init() -> (Router, DirectTransport) {
{
let mut builder = env_logger::builder();
if env::var(env_logger::DEFAULT_FILTER_ENV).is_err() {
builder.filter_level(log::LevelFilter::Off);
}
let _ = builder.is_test(true).try_init();
}<|fim▁hole|>
let worker = worker_manager
.create_worker(WorkerSettings::default())
.await
.expect("Failed to create worker");
let router = worker
.create_router(RouterOptions::default())
.await
.expect("Failed to create router");
let transport = router
.create_direct_transport(DirectTransportOptions::default())
.await
.expect("Failed to create transport1");
(router, transport)
}
#[test]
fn router_close_event() {
future::block_on(async move {
let (router, transport) = init().await;
let (mut close_tx, close_rx) = async_oneshot::oneshot::<()>();
let _handler = transport.on_close(Box::new(move || {
let _ = close_tx.send(());
}));
let (mut router_close_tx, router_close_rx) = async_oneshot::oneshot::<()>();
let _handler = transport.on_router_close(Box::new(move || {
let _ = router_close_tx.send(());
}));
router.close();
router_close_rx
.await
.expect("Failed to receive router_close event");
close_rx.await.expect("Failed to receive close event");
assert_eq!(transport.closed(), true);
});
}<|fim▁end|> |
let worker_manager = WorkerManager::new(); |
<|file_name|>AtomicMarkableReference.java<|end_file_name|><|fim▁begin|>/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* This file is available under and governed by the GNU General Public
* License version 2 only, as published by the Free Software Foundation.
* However, the following notice accompanied the original version of this
* file:
*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package java.util.concurrent.atomic;
/**
* An {@code AtomicMarkableReference} maintains an object reference
* along with a mark bit, that can be updated atomically.
*
* <p>Implementation note: This implementation maintains markable
* references by creating internal objects representing "boxed"
* [reference, boolean] pairs.
*
* @since 1.5
* @author Doug Lea
* @param <V> The type of object referred to by this reference
*/
public class AtomicMarkableReference<V> {
private static class Pair<T> {
final T reference;
final boolean mark;
private Pair(T reference, boolean mark) {
this.reference = reference;
this.mark = mark;
}
static <T> Pair<T> of(T reference, boolean mark) {
return new Pair<T>(reference, mark);
}
}
private volatile Pair<V> pair;
/**
* Creates a new {@code AtomicMarkableReference} with the given
* initial values.
*
* @param initialRef the initial reference
* @param initialMark the initial mark
*/
public AtomicMarkableReference(V initialRef, boolean initialMark) {
pair = Pair.of(initialRef, initialMark);
}
/**
* Returns the current value of the reference.
*
* @return the current value of the reference
*/
public V getReference() {
return pair.reference;
}
/**
* Returns the current value of the mark.
*
* @return the current value of the mark
*/
public boolean isMarked() {
return pair.mark;
}
/**
* Returns the current values of both the reference and the mark.
* Typical usage is {@code boolean[1] holder; ref = v.get(holder); }.
*
* @param markHolder an array of size of at least one. On return,
* {@code markholder[0]} will hold the value of the mark.
* @return the current value of the reference
*/
public V get(boolean[] markHolder) {
Pair<V> pair = this.pair;
markHolder[0] = pair.mark;
return pair.reference;
}
/**
* Atomically sets the value of both the reference and mark
* to the given update values if the
* current reference is {@code ==} to the expected reference
* and the current mark is equal to the expected mark.
*
* <p><a href="package-summary.html#weakCompareAndSet">May fail
* spuriously and does not provide ordering guarantees</a>, so is
* only rarely an appropriate alternative to {@code compareAndSet}.
*
* @param expectedReference the expected value of the reference
* @param newReference the new value for the reference
* @param expectedMark the expected value of the mark
* @param newMark the new value for the mark
* @return {@code true} if successful
*/
public boolean weakCompareAndSet(V expectedReference,
V newReference,
boolean expectedMark,
boolean newMark) {
return compareAndSet(expectedReference, newReference,
expectedMark, newMark);
}
/**
* Atomically sets the value of both the reference and mark
* to the given update values if the
* current reference is {@code ==} to the expected reference
* and the current mark is equal to the expected mark.
*
* @param expectedReference the expected value of the reference
* @param newReference the new value for the reference
* @param expectedMark the expected value of the mark
* @param newMark the new value for the mark
* @return {@code true} if successful
*/
public boolean compareAndSet(V expectedReference,
V newReference,
boolean expectedMark,
boolean newMark) {
Pair<V> current = pair;
return
expectedReference == current.reference &&
expectedMark == current.mark &&
((newReference == current.reference &&
newMark == current.mark) ||
casPair(current, Pair.of(newReference, newMark)));
}
/**
* Unconditionally sets the value of both the reference and mark.
*
* @param newReference the new value for the reference
* @param newMark the new value for the mark
*/
public void set(V newReference, boolean newMark) {
Pair<V> current = pair;
if (newReference != current.reference || newMark != current.mark)
this.pair = Pair.of(newReference, newMark);
}
/**
* Atomically sets the value of the mark to the given update value
* if the current reference is {@code ==} to the expected<|fim▁hole|> * succeed.
*
* @param expectedReference the expected value of the reference
* @param newMark the new value for the mark
* @return {@code true} if successful
*/
public boolean attemptMark(V expectedReference, boolean newMark) {
Pair<V> current = pair;
return
expectedReference == current.reference &&
(newMark == current.mark ||
casPair(current, Pair.of(expectedReference, newMark)));
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
// private static final long pairOffset =
// objectFieldOffset(UNSAFE, "pair", AtomicMarkableReference.class);
private boolean casPair(Pair<V> cmp, Pair<V> val) {
return UNSAFE.compareAndSwapObject(this, pairOffset, cmp, val);
}
public static volatile long pairOffset;
static {
try {
pairOffset = 0;
UNSAFE.registerStaticFieldOffset(
AtomicMarkableReference.class.getDeclaredField("pairOffset"),
AtomicMarkableReference.class.getDeclaredField("pair"));
} catch (Exception ex) { throw new Error(ex); }
}
}<|fim▁end|> | * reference. Any given invocation of this operation may fail
* (return {@code false}) spuriously, but repeated invocation
* when the current value holds the expected value and no other
* thread is also attempting to set the value will eventually |
<|file_name|>LocalAuthSupplier.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2018 OPS4J Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ops4j.kaiserkai.rest;
import com.spotify.docker.client.auth.RegistryAuthSupplier;
import com.spotify.docker.client.exceptions.DockerException;
import com.spotify.docker.client.messages.RegistryAuth;
import com.spotify.docker.client.messages.RegistryConfigs;
/**
* @author Harald Wellmann
*
*/
public class LocalAuthSupplier implements RegistryAuthSupplier {
@Override
public RegistryAuth authFor(String imageName) throws DockerException {
if (imageName.startsWith("127.0.0.1")) {
RegistryAuth auth = RegistryAuth.builder().username("admin").password("admin").build();
return auth;
}<|fim▁hole|>
@Override
public RegistryAuth authForSwarm() throws DockerException {
return null;
}
@Override
public RegistryConfigs authForBuild() throws DockerException {
return null;
}
}<|fim▁end|> | return null;
} |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>"""A guestbook sample with sqlite3."""
import logging
import os
import jinja2
import sqlite3
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import modules
from google.appengine.api import runtime
from google.appengine.api import users
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
DB_FILENAME = os.path.join('/tmp', 'guestbook.sqlite')
CREATE_TABLE_SQL = """\
CREATE TABLE IF NOT EXISTS guestbook
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR, content VARCHAR)"""
SELECT_SQL = 'SELECT * FROM guestbook ORDER BY id DESC LIMIT {}'
INSERT_SQL = 'INSERT INTO guestbook (name, content) VALUES (?, ?)'
POST_PER_PAGE = 20
def shutdown_hook():
"""A hook function for de-registering myself."""
logging.info('shutdown_hook called.')
instance_id = modules.get_current_instance_id()
ndb.transaction(
lambda: ActiveServer.get_instance_key(instance_id).delete())
def get_connection():
"""A function to get sqlite connection.
Returns:
An sqlite connection object.
"""
logging.info('Opening a sqlite db.')
return sqlite3.connect(DB_FILENAME)
def get_url_for_instance(instance_id):
"""Return a full url of the guestbook running on a particular instance.
Args:
A string to represent an VM instance.
Returns:
URL string for the guestbook form on the instance.
"""
hostname = app_identity.get_default_version_hostname()
return 'https://{}-dot-{}-dot-{}/guestbook'.format(
instance_id, modules.get_current_version_name(), hostname)
def get_signin_navigation(original_url):
"""Return a pair of a link text and a link for sign in/out operation.
Args:
An original URL.
Returns:
Two value tuple; a url and a link text.
"""
if users.get_current_user():
url = users.create_logout_url(original_url)
url_linktext = 'Logout'
else:
url = users.create_login_url(original_url)
url_linktext = 'Login'
return url, url_linktext
class ActiveServer(ndb.Model):
"""A model to store active servers.
We use the instance id as the key name, and there are no properties.
"""
@classmethod
def get_instance_key(cls, instance_id):
"""Return a key for the given instance_id.
Args:
An instance id for the server.
Returns:
A Key object which has a common parent key with the name 'Root'.
"""
return ndb.Key(cls, 'Root', cls, instance_id)
class ListServers(webapp2.RequestHandler):
"""A handler for listing active servers."""
def get(self):
"""A get handler for listing active servers."""
key = ndb.Key(ActiveServer, 'Root')
query = ActiveServer.query(ancestor=key)
servers = []
for key in query.iter(keys_only=True):
instance_id = key.string_id()
servers.append((instance_id, get_url_for_instance(instance_id)))
template = JINJA_ENVIRONMENT.get_template('index.html')
url, url_linktext = get_signin_navigation(self.request.uri)
self.response.out.write(template.render(servers=servers,
url=url,
url_linktext=url_linktext))
class MainPage(webapp2.RequestHandler):
"""A handler for showing the guestbook form."""
def get(self):
"""Guestbook main page."""
con = get_connection()
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute(SELECT_SQL.format(POST_PER_PAGE))
greetings = cur.fetchall()
con.close()
template = JINJA_ENVIRONMENT.get_template('guestbook.html')
url, url_linktext = get_signin_navigation(self.request.uri)
self.response.write(template.render(greetings=greetings,
url=url,
url_linktext=url_linktext))
class Guestbook(webapp2.RequestHandler):
"""A handler for storing a message."""
def post(self):
"""A handler for storing a message."""
author = ''
if users.get_current_user():
author = users.get_current_user().nickname()
con = get_connection()
with con:
con.execute(INSERT_SQL, (author, self.request.get('content')))
self.redirect('/guestbook')
class Start(webapp2.RequestHandler):
"""A handler for /_ah/start."""
def get(self):
"""A handler for /_ah/start, registering myself."""
runtime.set_shutdown_hook(shutdown_hook)
con = get_connection()
with con:
con.execute(CREATE_TABLE_SQL)
instance_id = modules.get_current_instance_id()
server = ActiveServer(key=ActiveServer.get_instance_key(instance_id))
server.put()
class Stop(webapp2.RequestHandler):
"""A handler for /_ah/stop."""
def get(self):<|fim▁hole|> With the initial version of the VM Runtime, a call to
/_ah/stop hits this handler, without invoking the shutdown
hook we registered in the start handler. We're working on the
fix to make it a consistent behavior same as the traditional
App Engine backends. After the fix is out, this stop handler
won't be necessary any more.
"""
shutdown_hook()
APPLICATION = webapp2.WSGIApplication([
('/', ListServers),
('/guestbook', MainPage),
('/sign', Guestbook),
('/_ah/start', Start),
('/_ah/stop', Stop),
], debug=True)<|fim▁end|> | """Just call shutdown_hook now for a temporary workaround.
|
<|file_name|>iterate-over-array.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or<|fim▁hole|>// except according to those terms.
#[no_mangle]
fn test(x: &[int]) -> int {
let mut y = 0;
let mut i = 0;
while (i < x.len()) {
y += x[i];
i += 1;
}
y
}<|fim▁end|> | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed |
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|>exports.forward = require('./forward');
exports.respond = require('./respond');<|fim▁end|> | |
<|file_name|>constant.rs<|end_file_name|><|fim▁begin|>//! Handling of `static`s, `const`s and promoted allocations
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_errors::ErrorReported;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{
read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
};
use rustc_middle::ty::ConstKind;
use rustc_span::DUMMY_SP;
use cranelift_codegen::ir::GlobalValueData;
use cranelift_module::*;
use crate::prelude::*;
pub(crate) struct ConstantCx {
todo: Vec<TodoItem>,
done: FxHashSet<DataId>,
anon_allocs: FxHashMap<AllocId, DataId>,
}
#[derive(Copy, Clone, Debug)]
enum TodoItem {
Alloc(AllocId),
Static(DefId),
}
impl ConstantCx {
pub(crate) fn new() -> Self {
ConstantCx { todo: vec![], done: FxHashSet::default(), anon_allocs: FxHashMap::default() }
}
pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut dyn Module) {
//println!("todo {:?}", self.todo);
define_all_allocs(tcx, module, &mut self);
//println!("done {:?}", self.done);
self.done.clear();
}
}
pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
let mut all_constants_ok = true;
for constant in &fx.mir.required_consts {
let const_ = match fx.monomorphize(constant.literal) {
ConstantKind::Ty(ct) => ct,
ConstantKind::Val(..) => continue,
};
match const_.val {
ConstKind::Value(_) => {}
ConstKind::Unevaluated(unevaluated) => {
if let Err(err) =
fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None)
{
all_constants_ok = false;
match err {
ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
}
ErrorHandled::TooGeneric => {
span_bug!(
constant.span,
"codgen encountered polymorphic constant: {:?}",
err
);
}
}
}
}
ConstKind::Param(_)
| ConstKind::Infer(_)
| ConstKind::Bound(_, _)
| ConstKind::Placeholder(_)
| ConstKind::Error(_) => unreachable!("{:?}", const_),
}
}
all_constants_ok
}
pub(crate) fn codegen_static(tcx: TyCtxt<'_>, module: &mut dyn Module, def_id: DefId) {
let mut constants_cx = ConstantCx::new();
constants_cx.todo.push(TodoItem::Static(def_id));
constants_cx.finalize(tcx, module);
}
pub(crate) fn codegen_tls_ref<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
def_id: DefId,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
if fx.clif_comments.enabled() {
fx.add_comment(local_data_id, format!("tls {:?}", def_id));
}
let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
CValue::by_val(tls_ptr, layout)
}
fn codegen_static_ref<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
def_id: DefId,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
if fx.clif_comments.enabled() {
fx.add_comment(local_data_id, format!("{:?}", def_id));
}
let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
assert!(!layout.is_unsized(), "unsized statics aren't supported");
assert!(
matches!(
fx.bcx.func.global_values[local_data_id],
GlobalValueData::Symbol { tls: false, .. }
),
"tls static referenced without Rvalue::ThreadLocalRef"
);
CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
}
pub(crate) fn codegen_constant<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
constant: &Constant<'tcx>,
) -> CValue<'tcx> {
let const_ = match fx.monomorphize(constant.literal) {
ConstantKind::Ty(ct) => ct,
ConstantKind::Val(val, ty) => return codegen_const_value(fx, val, ty),
};
let const_val = match const_.val {
ConstKind::Value(const_val) => const_val,
ConstKind::Unevaluated(uv)
if fx.tcx.is_static(uv.def.did) =>
{
assert!(uv.substs(fx.tcx).is_empty());
assert!(uv.promoted.is_none());
return codegen_static_ref(fx, uv.def.did, fx.layout_of(const_.ty)).to_cvalue(fx);
}
ConstKind::Unevaluated(unevaluated) => {
match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None) {
Ok(const_val) => const_val,
Err(_) => {
span_bug!(constant.span, "erroneous constant not captured by required_consts");
}
}
}
ConstKind::Param(_)
| ConstKind::Infer(_)
| ConstKind::Bound(_, _)
| ConstKind::Placeholder(_)
| ConstKind::Error(_) => unreachable!("{:?}", const_),
};
codegen_const_value(fx, const_val, const_.ty)
}
pub(crate) fn codegen_const_value<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
const_val: ConstValue<'tcx>,
ty: Ty<'tcx>,
) -> CValue<'tcx> {
let layout = fx.layout_of(ty);
assert!(!layout.is_unsized(), "sized const value");
if layout.is_zst() {
return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
}
match const_val {
ConstValue::Scalar(x) => match x {
Scalar::Int(int) => {
if fx.clif_type(layout.ty).is_some() {
return CValue::const_val(fx, layout, int);
} else {
let raw_val = int.to_bits(int.size()).unwrap();
let val = match int.size().bytes() {
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
4 => fx.bcx.ins().iconst(types::I32, raw_val as i64),
8 => fx.bcx.ins().iconst(types::I64, raw_val as i64),
16 => {
let lsb = fx.bcx.ins().iconst(types::I64, raw_val as u64 as i64);
let msb =
fx.bcx.ins().iconst(types::I64, (raw_val >> 64) as u64 as i64);
fx.bcx.ins().iconcat(lsb, msb)
}
_ => unreachable!(),
};
let place = CPlace::new_stack_slot(fx, layout);
place.to_ptr().store(fx, val, MemFlags::trusted());
place.to_cvalue(fx)
}
}
Scalar::Ptr(ptr, _size) => {
let (alloc_id, offset) = ptr.into_parts(); // we know the `offset` is relative
let alloc_kind = fx.tcx.get_global_alloc(alloc_id);
let base_addr = match alloc_kind {
Some(GlobalAlloc::Memory(alloc)) => {
let data_id = data_id_for_alloc_id(
&mut fx.constants_cx,
fx.module,
alloc_id,
alloc.mutability,
);
let local_data_id =
fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
if fx.clif_comments.enabled() {
fx.add_comment(local_data_id, format!("{:?}", alloc_id));
}
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
Some(GlobalAlloc::Function(instance)) => {
let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
let local_func_id =
fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
}
Some(GlobalAlloc::Static(def_id)) => {
assert!(fx.tcx.is_static(def_id));
let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
let local_data_id =
fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
if fx.clif_comments.enabled() {
fx.add_comment(local_data_id, format!("{:?}", def_id));
}
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
None => bug!("missing allocation {:?}", alloc_id),
};
let val = if offset.bytes() != 0 {
fx.bcx.ins().iadd_imm(base_addr, i64::try_from(offset.bytes()).unwrap())
} else {
base_addr
};
CValue::by_val(val, layout)
}
},
ConstValue::ByRef { alloc, offset } => CValue::by_ref(
pointer_for_allocation(fx, alloc)
.offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
layout,
),
ConstValue::Slice { data, start, end } => {
let ptr = pointer_for_allocation(fx, data)
.offset_i64(fx, i64::try_from(start).unwrap())
.get_addr(fx);
let len = fx
.bcx
.ins()
.iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
CValue::by_val_pair(ptr, len, layout)
}
}
}
pub(crate) fn pointer_for_allocation<'tcx>(<|fim▁hole|> alloc: &'tcx Allocation,
) -> crate::pointer::Pointer {
let alloc_id = fx.tcx.create_memory_alloc(alloc);
let data_id =
data_id_for_alloc_id(&mut fx.constants_cx, &mut *fx.module, alloc_id, alloc.mutability);
let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
if fx.clif_comments.enabled() {
fx.add_comment(local_data_id, format!("{:?}", alloc_id));
}
let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
crate::pointer::Pointer::new(global_ptr)
}
pub(crate) fn data_id_for_alloc_id(
cx: &mut ConstantCx,
module: &mut dyn Module,
alloc_id: AllocId,
mutability: rustc_hir::Mutability,
) -> DataId {
cx.todo.push(TodoItem::Alloc(alloc_id));
*cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
module.declare_anonymous_data(mutability == rustc_hir::Mutability::Mut, false).unwrap()
})
}
fn data_id_for_static(
tcx: TyCtxt<'_>,
module: &mut dyn Module,
def_id: DefId,
definition: bool,
) -> DataId {
let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
let linkage = if definition {
crate::linkage::get_static_linkage(tcx, def_id)
} else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
|| rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
{
Linkage::Preemptible
} else {
Linkage::Import
};
let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
let symbol_name = tcx.symbol_name(instance).name;
let ty = instance.ty(tcx, ParamEnv::reveal_all());
let is_mutable = if tcx.is_mutable_static(def_id) {
true
} else {
!ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
};
let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes();
let attrs = tcx.codegen_fn_attrs(def_id);
let data_id = module
.declare_data(
&*symbol_name,
linkage,
is_mutable,
attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
)
.unwrap();
if rlinkage.is_some() {
// Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
// Declare an internal global `extern_with_linkage_foo` which
// is initialized with the address of `foo`. If `foo` is
// discarded during linking (for example, if `foo` has weak
// linkage and there are no definitions), then
// `extern_with_linkage_foo` will instead be initialized to
// zero.
let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
let ref_data_id = module.declare_data(&ref_name, Linkage::Local, false, false).unwrap();
let mut data_ctx = DataContext::new();
data_ctx.set_align(align);
let data = module.declare_data_in_data(data_id, &mut data_ctx);
data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect());
data_ctx.write_data_addr(0, data, 0);
match module.define_data(ref_data_id, &data_ctx) {
// Every time the static is referenced there will be another definition of this global,
// so duplicate definitions are expected and allowed.
Err(ModuleError::DuplicateDefinition(_)) => {}
res => res.unwrap(),
}
ref_data_id
} else {
data_id
}
}
fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut ConstantCx) {
while let Some(todo_item) = cx.todo.pop() {
let (data_id, alloc, section_name) = match todo_item {
TodoItem::Alloc(alloc_id) => {
//println!("alloc_id {}", alloc_id);
let alloc = match tcx.get_global_alloc(alloc_id).unwrap() {
GlobalAlloc::Memory(alloc) => alloc,
GlobalAlloc::Function(_) | GlobalAlloc::Static(_) => unreachable!(),
};
let data_id = *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
module
.declare_anonymous_data(
alloc.mutability == rustc_hir::Mutability::Mut,
false,
)
.unwrap()
});
(data_id, alloc, None)
}
TodoItem::Static(def_id) => {
//println!("static {:?}", def_id);
let section_name = tcx.codegen_fn_attrs(def_id).link_section.map(|s| s.as_str());
let alloc = tcx.eval_static_initializer(def_id).unwrap();
let data_id = data_id_for_static(tcx, module, def_id, true);
(data_id, alloc, section_name)
}
};
//("data_id {}", data_id);
if cx.done.contains(&data_id) {
continue;
}
let mut data_ctx = DataContext::new();
data_ctx.set_align(alloc.align.bytes());
if let Some(section_name) = section_name {
let (segment_name, section_name) = if tcx.sess.target.is_like_osx {
if let Some(names) = section_name.split_once(',') {
names
} else {
tcx.sess.fatal(&format!(
"#[link_section = \"{}\"] is not valid for macos target: must be segment and section separated by comma",
section_name
));
}
} else {
("", &*section_name)
};
data_ctx.set_segment_section(segment_name, section_name);
}
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
data_ctx.define(bytes.into_boxed_slice());
for &(offset, alloc_id) in alloc.relocations().iter() {
let addend = {
let endianness = tcx.data_layout.endian;
let offset = offset.bytes() as usize;
let ptr_size = tcx.data_layout.pointer_size;
let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
offset..offset + ptr_size.bytes() as usize,
);
read_target_uint(endianness, bytes).unwrap()
};
let reloc_target_alloc = tcx.get_global_alloc(alloc_id).unwrap();
let data_id = match reloc_target_alloc {
GlobalAlloc::Function(instance) => {
assert_eq!(addend, 0);
let func_id = crate::abi::import_function(tcx, module, instance);
let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
continue;
}
GlobalAlloc::Memory(target_alloc) => {
data_id_for_alloc_id(cx, module, alloc_id, target_alloc.mutability)
}
GlobalAlloc::Static(def_id) => {
if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
{
tcx.sess.fatal(&format!(
"Allocation {:?} contains reference to TLS value {:?}",
alloc, def_id
));
}
// Don't push a `TodoItem::Static` here, as it will cause statics used by
// multiple crates to be duplicated between them. It isn't necessary anyway,
// as it will get pushed by `codegen_static` when necessary.
data_id_for_static(tcx, module, def_id, false)
}
};
let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
}
module.define_data(data_id, &data_ctx).unwrap();
cx.done.insert(data_id);
}
assert!(cx.todo.is_empty(), "{:?}", cx.todo);
}
pub(crate) fn mir_operand_get_const_val<'tcx>(
fx: &FunctionCx<'_, '_, 'tcx>,
operand: &Operand<'tcx>,
) -> Option<ConstValue<'tcx>> {
match operand {
Operand::Constant(const_) => match const_.literal {
ConstantKind::Ty(const_) => {
fx.monomorphize(const_).eval(fx.tcx, ParamEnv::reveal_all()).val.try_to_value()
}
ConstantKind::Val(val, _) => Some(val),
},
// FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
// inside a temporary before being passed to the intrinsic requiring the const argument.
// This code tries to find a single constant defining definition of the referenced local.
Operand::Copy(place) | Operand::Move(place) => {
if !place.projection.is_empty() {
return None;
}
let mut computed_const_val = None;
for bb_data in fx.mir.basic_blocks() {
for stmt in &bb_data.statements {
match &stmt.kind {
StatementKind::Assign(local_and_rvalue) if &local_and_rvalue.0 == place => {
match &local_and_rvalue.1 {
Rvalue::Cast(CastKind::Misc, operand, ty) => {
if computed_const_val.is_some() {
return None; // local assigned twice
}
if !matches!(ty.kind(), ty::Uint(_) | ty::Int(_)) {
return None;
}
let const_val = mir_operand_get_const_val(fx, operand)?;
if fx.layout_of(ty).size
!= const_val.try_to_scalar_int()?.size()
{
return None;
}
computed_const_val = Some(const_val);
}
Rvalue::Use(operand) => {
computed_const_val = mir_operand_get_const_val(fx, operand)
}
_ => return None,
}
}
StatementKind::SetDiscriminant { place: stmt_place, variant_index: _ }
if &**stmt_place == place =>
{
return None;
}
StatementKind::LlvmInlineAsm(_) | StatementKind::CopyNonOverlapping(_) => {
return None;
} // conservative handling
StatementKind::Assign(_)
| StatementKind::FakeRead(_)
| StatementKind::SetDiscriminant { .. }
| StatementKind::StorageLive(_)
| StatementKind::StorageDead(_)
| StatementKind::Retag(_, _)
| StatementKind::AscribeUserType(_, _)
| StatementKind::Coverage(_)
| StatementKind::Nop => {}
}
}
match &bb_data.terminator().kind {
TerminatorKind::Goto { .. }
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::Resume
| TerminatorKind::Abort
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::Drop { .. }
| TerminatorKind::Assert { .. } => {}
TerminatorKind::DropAndReplace { .. }
| TerminatorKind::Yield { .. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. } => unreachable!(),
TerminatorKind::InlineAsm { .. } => return None,
TerminatorKind::Call { destination: Some((call_place, _)), .. }
if call_place == place =>
{
return None;
}
TerminatorKind::Call { .. } => {}
}
}
computed_const_val
}
}
}<|fim▁end|> | fx: &mut FunctionCx<'_, '_, 'tcx>, |
<|file_name|>search.service.spec.ts<|end_file_name|><|fim▁begin|>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {TestBed, fakeAsync, tick} from '@angular/core/testing';
import {SearchService} from './search.service';
import {COMService} from '../com/com.service';
import {ApiRepositories} from './interfaces';
import {of, throwError, empty} from 'rxjs';
describe('SearchService', () => {
let service: SearchService;
// Mock sub-services
const repositories: ApiRepositories = {
hasnext: false,
hasprev: false,
repos: [
{name: 'repo1', organization: ''},
{name: 'repo2', organization: ''},
],
};
const mockCOMService = {
fetchRepositories: () => of(repositories),
};
beforeEach(() => {
TestBed.configureTestingModule({
providers: [{provide: COMService, useValue: mockCOMService}],
});
service = TestBed.inject(SearchService);
mockCOMService.fetchRepositories = () => of(repositories);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
describe('quickSearch', () => {
it('should return the fetched repositories', done => {
const targetRepo = 'repo',
targetOrg = 'org';
service.quickSearch(targetRepo, targetOrg).subscribe(repos => {
expect(repos.length).toEqual(repositories.repos.length);
repos.forEach((repo, index) => {
expect(repo.name).toEqual(repositories.repos[index].name);
});
done();
});
});
it('should return an empty response if an error occurs', done => {
// prepare the error
mockCOMService.fetchRepositories = () => throwError('');
// query a normal repo<|fim▁hole|> done();
});
});
});
describe('search', () => {
it('should return the fetched repositories', done => {
service.search('repo', 'org', []).subscribe(reponse => {
expect(reponse.repos.length).toEqual(repositories.repos.length);
reponse.repos.forEach((repo, index) => {
expect(repo.name).toEqual(repositories.repos[index].name);
});
done();
});
});
it("should add the 'order by priority' filter to the search if it isn't already present", fakeAsync(() => {
const reposFetcher = spyOn(
mockCOMService,
'fetchRepositories'
).and.returnValue(empty());
service.search('repo', 'org', []).subscribe();
tick();
expect(reposFetcher).toHaveBeenCalledTimes(1);
const reposFetcherArgs = reposFetcher.calls.mostRecent().args as (
| object
| string
)[];
expect(reposFetcherArgs[0]).toEqual('repo');
expect(reposFetcherArgs[1]).toEqual('org');
expect(reposFetcherArgs[2]).toEqual([
{name: 'orderby', value: 'priority'},
]);
}));
it("should not add the 'order by priority' filter to the search if it is already present", fakeAsync(() => {
const reposFetcher = spyOn(
mockCOMService,
'fetchRepositories'
).and.returnValue(empty());
const filters = [{name: 'orderby', value: 'name'}];
service.search('repo', 'org', filters).subscribe();
tick();
expect(reposFetcher).toHaveBeenCalledTimes(1);
const reposFetcherArgs = reposFetcher.calls.mostRecent().args as (
| object
| string
)[];
expect(reposFetcherArgs[0]).toEqual('repo');
expect(reposFetcherArgs[1]).toEqual('org');
expect(reposFetcherArgs[2]).toEqual(filters);
}));
});
});<|fim▁end|> | const targetRepo = 'repo',
targetOrg = 'org';
service.quickSearch(targetRepo, targetOrg).subscribe(repos => {
expect(repos.length).toEqual(0); // empty array |
<|file_name|>before_vm_migrate_destination.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import os
import sys
import grp
import pwd
import traceback
import utils
import hooking
DEV_MAPPER_PATH = "/dev/mapper"
DEV_DIRECTLUN_PATH = '/dev/directlun'
def createdirectory(dirpath):
# we don't use os.mkdir/chown because we need sudo
command = ['/bin/mkdir', '-p', dirpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error mkdir %s, err = %s\n' % (dirpath, err))
sys.exit(2)
mode = '755'
command = ['/bin/chmod', mode, dirpath]
if retcode != 0:
sys.stderr.write('directlun: error chmod %s %s, err = %s\n' % (dirpath, mode, err))
sys.exit(2)
def cloneDeviceNode(srcpath, devpath):
"""Clone a device node into a temporary private location."""
# we don't use os.remove/mknod/chmod/chown because we need sudo
command = ['/bin/rm', '-f', devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error rm -f %s, err = %s\n' % (devpath, err))
sys.exit(2)
stat = os.stat(srcpath)
major = os.major(stat.st_rdev)
minor = os.minor(stat.st_rdev)
command = ['/bin/mknod', devpath, 'b', str(major), str(minor)]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)<|fim▁hole|>
mode = '660'
command = ['/bin/chmod', mode, devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error chmod %s to %s, err = %s\n' % (devpath, mode, err))
sys.exit(2)
group = grp.getgrnam('qemu')
gid = group.gr_gid
user = pwd.getpwnam('qemu')
uid = user.pw_uid
owner = str(uid) + ':' + str(gid)
command = ['/bin/chown', owner, devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error chown %s to %s, err = %s\n' % (devpath, owner, err))
sys.exit(2)
if os.environ.has_key('directlun'):
try:
luns = os.environ['directlun']
domxml = hooking.read_domxml()
createdirectory(DEV_DIRECTLUN_PATH)
for lun in luns.split(','):
try:
lun, options = lun.split(':')
except ValueError:
options = ''
options = options.split(';')
srcpath = DEV_MAPPER_PATH + '/' + lun
if not os.path.exists(srcpath):
sys.stderr.write('directlun before_vm_migration_destination: device not found %s\n' % srcpath)
sys.exit(2)
uuid = domxml.getElementsByTagName('uuid')[0]
uuid = uuid.childNodes[0].nodeValue
devpath = DEV_DIRECTLUN_PATH + '/' + lun + '-' + uuid
cloneDeviceNode(srcpath, devpath)
hooking.write_domxml(domxml)
except:
sys.stderr.write('directlun before_vm_migration_destination: [unexpected error]: %s\n' % traceback.format_exc())
sys.exit(2)<|fim▁end|> | if retcode != 0:
sys.stderr.write('directlun: error mknod %s, err = %s\n' % (devpath, err))
sys.exit(2) |
<|file_name|>static.js<|end_file_name|><|fim▁begin|>// Here will be compiled design document<|fim▁hole|>DESIGNS=false;<|fim▁end|> | |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Computed values.
use self::transform::DirectionVector;
use super::animated::ToAnimatedValue;
use super::generics::grid::GridTemplateComponent as GenericGridTemplateComponent;
use super::generics::grid::ImplicitGridTracks as GenericImplicitGridTracks;
use super::generics::grid::{GenericGridLine, GenericTrackBreadth};
use super::generics::grid::{GenericTrackSize, TrackList as GenericTrackList};
use super::generics::transform::IsParallelTo;
use super::generics::{self, GreaterThanOrEqualToOne, NonNegative, ZeroToOne};
use super::specified;
use super::{CSSFloat, CSSInteger};
use crate::context::QuirksMode;
use crate::font_metrics::{get_metrics_provider_for_product, FontMetricsProvider};
use crate::media_queries::Device;
#[cfg(feature = "gecko")]
use crate::properties;
use crate::properties::{ComputedValues, LonghandId, StyleBuilder};
use crate::rule_cache::RuleCacheConditions;
use crate::{ArcSlice, Atom, One};
use euclid::{default, Point2D, Rect, Size2D};
use servo_arc::Arc;
use std::cell::RefCell;
use std::cmp;
use std::f32;
use std::ops::{Add, Sub};
#[cfg(feature = "gecko")]
pub use self::align::{
AlignContent, AlignItems, AlignTracks, JustifyContent, JustifyItems, JustifyTracks,
SelfAlignment,
};
#[cfg(feature = "gecko")]
pub use self::align::{AlignSelf, JustifySelf};
pub use self::angle::Angle;
pub use self::background::{BackgroundRepeat, BackgroundSize};
pub use self::basic_shape::FillRule;
pub use self::border::{BorderCornerRadius, BorderRadius, BorderSpacing};
pub use self::border::{BorderImageRepeat, BorderImageSideWidth};
pub use self::border::{BorderImageSlice, BorderImageWidth};
pub use self::box_::{AnimationIterationCount, AnimationName, Contain};
pub use self::box_::{Appearance, BreakBetween, BreakWithin, Clear, Float};
pub use self::box_::{Display, Overflow, OverflowAnchor, TransitionProperty};
pub use self::box_::{OverflowClipBox, OverscrollBehavior, Perspective, Resize};
pub use self::box_::{ScrollSnapAlign, ScrollSnapAxis, ScrollSnapStrictness, ScrollSnapType};
pub use self::box_::{TouchAction, VerticalAlign, WillChange};
pub use self::color::{Color, ColorOrAuto, ColorPropertyValue};
pub use self::column::ColumnCount;
pub use self::counters::{Content, ContentItem, CounterIncrement, CounterSetOrReset};
pub use self::easing::TimingFunction;
pub use self::effects::{BoxShadow, Filter, SimpleShadow};
pub use self::flex::FlexBasis;
pub use self::font::{FontFamily, FontLanguageOverride, FontStyle};
pub use self::font::{FontFeatureSettings, FontVariantLigatures, FontVariantNumeric};
pub use self::font::{FontSize, FontSizeAdjust, FontStretch, FontSynthesis};
pub use self::font::{FontVariantAlternates, FontWeight};
pub use self::font::{FontVariantEastAsian, FontVariationSettings};
pub use self::font::{MathDepth, MozScriptMinSize, MozScriptSizeMultiplier, XLang, XTextZoom};
pub use self::image::{Gradient, Image, LineDirection, MozImageRect};
pub use self::length::{CSSPixelLength, ExtremumLength, NonNegativeLength};
pub use self::length::{Length, LengthOrNumber, LengthPercentage, NonNegativeLengthOrNumber};
pub use self::length::{LengthOrAuto, LengthPercentageOrAuto, MaxSize, Size};
pub use self::length::{NonNegativeLengthPercentage, NonNegativeLengthPercentageOrAuto};
#[cfg(feature = "gecko")]
pub use self::list::ListStyleType;
pub use self::list::MozListReversed;
pub use self::list::Quotes;
pub use self::motion::{OffsetPath, OffsetRotate};
pub use self::outline::OutlineStyle;
pub use self::page::{Orientation, PageSize, PaperSize};
pub use self::percentage::{NonNegativePercentage, Percentage};
pub use self::position::AspectRatio;
pub use self::position::{
GridAutoFlow, GridTemplateAreas, MasonryAutoFlow, Position, PositionOrAuto, ZIndex,
};
pub use self::ratio::Ratio;
pub use self::rect::NonNegativeLengthOrNumberRect;
pub use self::resolution::Resolution;
pub use self::svg::MozContextProperties;
pub use self::svg::{SVGLength, SVGOpacity, SVGPaint, SVGPaintKind};
pub use self::svg::{SVGPaintOrder, SVGStrokeDashArray, SVGWidth};
pub use self::text::TextUnderlinePosition;
pub use self::text::{InitialLetter, LetterSpacing, LineBreak, LineHeight};
pub use self::text::{OverflowWrap, TextOverflow, WordBreak, WordSpacing};
pub use self::text::{TextAlign, TextAlignLast, TextEmphasisPosition, TextEmphasisStyle};
pub use self::text::{TextDecorationLength, TextDecorationSkipInk};
pub use self::time::Time;
pub use self::transform::{Rotate, Scale, Transform, TransformOperation};
pub use self::transform::{TransformOrigin, TransformStyle, Translate};
#[cfg(feature = "gecko")]
pub use self::ui::CursorImage;
pub use self::ui::{Cursor, MozForceBrokenImageIcon, UserSelect};
pub use super::specified::TextTransform;
pub use super::specified::{BorderStyle, TextDecorationLine};
pub use super::{Auto, Either, None_};
pub use app_units::Au;
#[cfg(feature = "gecko")]
pub mod align;
pub mod angle;
pub mod background;
pub mod basic_shape;
pub mod border;
#[path = "box.rs"]
pub mod box_;
pub mod color;
pub mod column;
pub mod counters;
pub mod easing;
pub mod effects;
pub mod flex;
pub mod font;
pub mod image;
pub mod length;
pub mod length_percentage;
pub mod list;
pub mod motion;
pub mod outline;
pub mod page;
pub mod percentage;
pub mod position;
pub mod ratio;
pub mod rect;
pub mod resolution;
pub mod svg;
pub mod table;
pub mod text;
pub mod time;
pub mod transform;
pub mod ui;
pub mod url;
/// A `Context` is all the data a specified value could ever need to compute
/// itself and be transformed to a computed value.
pub struct Context<'a> {
/// Values accessed through this need to be in the properties "computed
/// early": color, text-decoration, font-size, display, position, float,
/// border-*-style, outline-style, font-family, writing-mode...
pub builder: StyleBuilder<'a>,
/// A cached computed system font value, for use by gecko.
///
/// See properties/longhands/font.mako.rs
#[cfg(feature = "gecko")]
pub cached_system_font: Option<properties::longhands::system_font::ComputedSystemFont>,
/// A dummy option for servo so initializing a computed::Context isn't
/// painful.
///
/// TODO(emilio): Make constructors for Context, and drop this.
#[cfg(feature = "servo")]
pub cached_system_font: Option<()>,
/// A font metrics provider, used to access font metrics to implement
/// font-relative units.
pub font_metrics_provider: &'a dyn FontMetricsProvider,
/// Whether or not we are computing the media list in a media query
pub in_media_query: bool,
/// The quirks mode of this context.
pub quirks_mode: QuirksMode,
/// Whether this computation is being done for a SMIL animation.
///
/// This is used to allow certain properties to generate out-of-range
/// values, which SMIL allows.
pub for_smil_animation: bool,
/// The property we are computing a value for, if it is a non-inherited
/// property. None if we are computed a value for an inherited property
/// or not computing for a property at all (e.g. in a media query
/// evaluation).
pub for_non_inherited_property: Option<LonghandId>,
/// The conditions to cache a rule node on the rule cache.
///
/// FIXME(emilio): Drop the refcell.
pub rule_cache_conditions: RefCell<&'a mut RuleCacheConditions>,
}
impl<'a> Context<'a> {
/// Creates a suitable context for media query evaluation, in which
/// font-relative units compute against the system_font, and executes `f`
/// with it.
pub fn for_media_query_evaluation<F, R>(device: &Device, quirks_mode: QuirksMode, f: F) -> R
where
F: FnOnce(&Context) -> R,
{
let mut conditions = RuleCacheConditions::default();
let provider = get_metrics_provider_for_product();
let context = Context {
builder: StyleBuilder::for_inheritance(device, None, None),
font_metrics_provider: &provider,
cached_system_font: None,
in_media_query: true,
quirks_mode,
for_smil_animation: false,
for_non_inherited_property: None,
rule_cache_conditions: RefCell::new(&mut conditions),
};
f(&context)
}
/// The current device.
pub fn device(&self) -> &Device {
self.builder.device
}
/// The current viewport size, used to resolve viewport units.
pub fn viewport_size_for_viewport_unit_resolution(&self) -> default::Size2D<Au> {
self.builder
.device
.au_viewport_size_for_viewport_unit_resolution()
}
/// The default computed style we're getting our reset style from.
pub fn default_style(&self) -> &ComputedValues {
self.builder.default_style()
}
/// The current style.
pub fn style(&self) -> &StyleBuilder {
&self.builder
}
/// Apply text-zoom if enabled.
#[cfg(feature = "gecko")]
pub fn maybe_zoom_text(&self, size: CSSPixelLength) -> CSSPixelLength {
// We disable zoom for <svg:text> by unsetting the
// -x-text-zoom property, which leads to a false value
// in mAllowZoomAndMinSize
if self.style().get_font().gecko.mAllowZoomAndMinSize {
self.device().zoom_text(size)
} else {
size
}
}
/// (Servo doesn't do text-zoom)
#[cfg(feature = "servo")]
pub fn maybe_zoom_text(&self, size: CSSPixelLength) -> CSSPixelLength {
size
}
}
/// An iterator over a slice of computed values
#[derive(Clone)]
pub struct ComputedVecIter<'a, 'cx, 'cx_a: 'cx, S: ToComputedValue + 'a> {
cx: &'cx Context<'cx_a>,
values: &'a [S],
}
impl<'a, 'cx, 'cx_a: 'cx, S: ToComputedValue + 'a> ComputedVecIter<'a, 'cx, 'cx_a, S> {
/// Construct an iterator from a slice of specified values and a context
pub fn new(cx: &'cx Context<'cx_a>, values: &'a [S]) -> Self {
ComputedVecIter {
cx: cx,
values: values,
}
}
}
impl<'a, 'cx, 'cx_a: 'cx, S: ToComputedValue + 'a> ExactSizeIterator
for ComputedVecIter<'a, 'cx, 'cx_a, S>
{
fn len(&self) -> usize {
self.values.len()
}
}
impl<'a, 'cx, 'cx_a: 'cx, S: ToComputedValue + 'a> Iterator for ComputedVecIter<'a, 'cx, 'cx_a, S> {
type Item = S::ComputedValue;
fn next(&mut self) -> Option<Self::Item> {
if let Some((next, rest)) = self.values.split_first() {
let ret = next.to_computed_value(self.cx);
self.values = rest;
Some(ret)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.values.len(), Some(self.values.len()))
}
}
/// A trait to represent the conversion between computed and specified values.
///
/// This trait is derivable with `#[derive(ToComputedValue)]`. The derived
/// implementation just calls `ToComputedValue::to_computed_value` on each field
/// of the passed value. The deriving code assumes that if the type isn't
/// generic, then the trait can be implemented as simple `Clone::clone` calls,
/// this means that a manual implementation with `ComputedValue = Self` is bogus
/// if it returns anything else than a clone.
pub trait ToComputedValue {
/// The computed value type we're going to be converted to.
type ComputedValue;
/// Convert a specified value to a computed value, using itself and the data
/// inside the `Context`.
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue;
/// Convert a computed value to specified value form.
///
/// This will be used for recascading during animation.
/// Such from_computed_valued values should recompute to the same value.
fn from_computed_value(computed: &Self::ComputedValue) -> Self;
}
impl<A, B> ToComputedValue for (A, B)
where
A: ToComputedValue,
B: ToComputedValue,
{
type ComputedValue = (
<A as ToComputedValue>::ComputedValue,
<B as ToComputedValue>::ComputedValue,
);
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
(
self.0.to_computed_value(context),
self.1.to_computed_value(context),
)
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
(
A::from_computed_value(&computed.0),
B::from_computed_value(&computed.1),
)
}
}
impl<T> ToComputedValue for Option<T>
where
T: ToComputedValue,
{
type ComputedValue = Option<<T as ToComputedValue>::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
self.as_ref().map(|item| item.to_computed_value(context))
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
computed.as_ref().map(T::from_computed_value)
}
}
impl<T> ToComputedValue for default::Size2D<T>
where
T: ToComputedValue,
{
type ComputedValue = default::Size2D<<T as ToComputedValue>::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
Size2D::new(
self.width.to_computed_value(context),
self.height.to_computed_value(context),
)
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
Size2D::new(
T::from_computed_value(&computed.width),
T::from_computed_value(&computed.height),
)
}
}
impl<T> ToComputedValue for Vec<T>
where
T: ToComputedValue,
{
type ComputedValue = Vec<<T as ToComputedValue>::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
self.iter()
.map(|item| item.to_computed_value(context))
.collect()
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
computed.iter().map(T::from_computed_value).collect()
}
}
impl<T> ToComputedValue for Box<T>
where
T: ToComputedValue,
{
type ComputedValue = Box<<T as ToComputedValue>::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
Box::new(T::to_computed_value(self, context))
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
Box::new(T::from_computed_value(computed))
}
}
impl<T> ToComputedValue for Box<[T]>
where
T: ToComputedValue,
{
type ComputedValue = Box<[<T as ToComputedValue>::ComputedValue]>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
self.iter()
.map(|item| item.to_computed_value(context))
.collect::<Vec<_>>()
.into_boxed_slice()
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
computed
.iter()
.map(T::from_computed_value)
.collect::<Vec<_>>()
.into_boxed_slice()
}
}
impl<T> ToComputedValue for crate::OwnedSlice<T>
where
T: ToComputedValue,
{
type ComputedValue = crate::OwnedSlice<<T as ToComputedValue>::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
self.iter()
.map(|item| item.to_computed_value(context))
.collect()
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
computed.iter().map(T::from_computed_value).collect()
}
}
// NOTE(emilio): This is implementable more generically, but it's unlikely
// what you want there, as it forces you to have an extra allocation.
//
// We could do that if needed, ideally with specialization for the case where
// ComputedValue = T. But we don't need it for now.
impl<T> ToComputedValue for Arc<T>
where
T: ToComputedValue<ComputedValue = T>,
{
type ComputedValue = Self;
#[inline]
fn to_computed_value(&self, _: &Context) -> Self {
self.clone()
}
#[inline]
fn from_computed_value(computed: &Self) -> Self {
computed.clone()
}
}
// Same caveat as above applies.
impl<T> ToComputedValue for ArcSlice<T>
where
T: ToComputedValue<ComputedValue = T>,
{
type ComputedValue = Self;
#[inline]
fn to_computed_value(&self, _: &Context) -> Self {
self.clone()
}
#[inline]
fn from_computed_value(computed: &Self) -> Self {
computed.clone()
}
}
trivial_to_computed_value!(());
trivial_to_computed_value!(bool);
trivial_to_computed_value!(f32);
trivial_to_computed_value!(i32);
trivial_to_computed_value!(u8);
trivial_to_computed_value!(u16);
trivial_to_computed_value!(u32);
trivial_to_computed_value!(usize);
trivial_to_computed_value!(Atom);
trivial_to_computed_value!(crate::values::AtomIdent);
#[cfg(feature = "servo")]
trivial_to_computed_value!(crate::Namespace);
#[cfg(feature = "servo")]
trivial_to_computed_value!(crate::Prefix);
trivial_to_computed_value!(String);
trivial_to_computed_value!(Box<str>);
trivial_to_computed_value!(crate::OwnedStr);
trivial_to_computed_value!(style_traits::values::specified::AllowedNumericType);
#[allow(missing_docs)]
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
PartialEq,
ToAnimatedZero,
ToCss,
ToResolvedValue,
)]
#[repr(C, u8)]
pub enum AngleOrPercentage {
Percentage(Percentage),
Angle(Angle),
}
impl ToComputedValue for specified::AngleOrPercentage {
type ComputedValue = AngleOrPercentage;
#[inline]
fn to_computed_value(&self, context: &Context) -> AngleOrPercentage {
match *self {
specified::AngleOrPercentage::Percentage(percentage) => {
AngleOrPercentage::Percentage(percentage.to_computed_value(context))
},
specified::AngleOrPercentage::Angle(angle) => {
AngleOrPercentage::Angle(angle.to_computed_value(context))
},
}
}
#[inline]
fn from_computed_value(computed: &AngleOrPercentage) -> Self {
match *computed {
AngleOrPercentage::Percentage(percentage) => specified::AngleOrPercentage::Percentage(
ToComputedValue::from_computed_value(&percentage),
),
AngleOrPercentage::Angle(angle) => {
specified::AngleOrPercentage::Angle(ToComputedValue::from_computed_value(&angle))
},
}
}
}
/// A `<number>` value.
pub type Number = CSSFloat;
impl IsParallelTo for (Number, Number, Number) {
fn is_parallel_to(&self, vector: &DirectionVector) -> bool {
use euclid::approxeq::ApproxEq;
// If a and b is parallel, the angle between them is 0deg, so
// a x b = |a|*|b|*sin(0)*n = 0 * n, |a x b| == 0.
let self_vector = DirectionVector::new(self.0, self.1, self.2);
self_vector
.cross(*vector)
.square_length()
.approx_eq(&0.0f32)
}
}
/// A wrapper of Number, but the value >= 0.
pub type NonNegativeNumber = NonNegative<CSSFloat>;
impl ToAnimatedValue for NonNegativeNumber {
type AnimatedValue = CSSFloat;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.0
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
animated.max(0.).into()
}
}
impl From<CSSFloat> for NonNegativeNumber {
#[inline]
fn from(number: CSSFloat) -> NonNegativeNumber {<|fim▁hole|> NonNegative::<CSSFloat>(number)
}
}
impl From<NonNegativeNumber> for CSSFloat {
#[inline]
fn from(number: NonNegativeNumber) -> CSSFloat {
number.0
}
}
impl One for NonNegativeNumber {
#[inline]
fn one() -> Self {
NonNegative(1.0)
}
#[inline]
fn is_one(&self) -> bool {
self.0 == 1.0
}
}
/// A wrapper of Number, but the value between 0 and 1
pub type ZeroToOneNumber = ZeroToOne<CSSFloat>;
impl ToAnimatedValue for ZeroToOneNumber {
type AnimatedValue = CSSFloat;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.0
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
Self(animated.max(0.).min(1.))
}
}
impl From<CSSFloat> for ZeroToOneNumber {
#[inline]
fn from(number: CSSFloat) -> Self {
Self(number)
}
}
/// A wrapper of Number, but the value >= 1.
pub type GreaterThanOrEqualToOneNumber = GreaterThanOrEqualToOne<CSSFloat>;
impl ToAnimatedValue for GreaterThanOrEqualToOneNumber {
type AnimatedValue = CSSFloat;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.0
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
animated.max(1.).into()
}
}
impl From<CSSFloat> for GreaterThanOrEqualToOneNumber {
#[inline]
fn from(number: CSSFloat) -> GreaterThanOrEqualToOneNumber {
GreaterThanOrEqualToOne::<CSSFloat>(number)
}
}
impl From<GreaterThanOrEqualToOneNumber> for CSSFloat {
#[inline]
fn from(number: GreaterThanOrEqualToOneNumber) -> CSSFloat {
number.0
}
}
#[allow(missing_docs)]
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
PartialEq,
ToAnimatedZero,
ToCss,
ToResolvedValue,
)]
#[repr(C, u8)]
pub enum NumberOrPercentage {
Percentage(Percentage),
Number(Number),
}
impl NumberOrPercentage {
fn clamp_to_non_negative(self) -> Self {
match self {
NumberOrPercentage::Percentage(p) => {
NumberOrPercentage::Percentage(p.clamp_to_non_negative())
},
NumberOrPercentage::Number(n) => NumberOrPercentage::Number(n.max(0.)),
}
}
}
impl ToComputedValue for specified::NumberOrPercentage {
type ComputedValue = NumberOrPercentage;
#[inline]
fn to_computed_value(&self, context: &Context) -> NumberOrPercentage {
match *self {
specified::NumberOrPercentage::Percentage(percentage) => {
NumberOrPercentage::Percentage(percentage.to_computed_value(context))
},
specified::NumberOrPercentage::Number(number) => {
NumberOrPercentage::Number(number.to_computed_value(context))
},
}
}
#[inline]
fn from_computed_value(computed: &NumberOrPercentage) -> Self {
match *computed {
NumberOrPercentage::Percentage(percentage) => {
specified::NumberOrPercentage::Percentage(ToComputedValue::from_computed_value(
&percentage,
))
},
NumberOrPercentage::Number(number) => {
specified::NumberOrPercentage::Number(ToComputedValue::from_computed_value(&number))
},
}
}
}
/// A non-negative <number-percentage>.
pub type NonNegativeNumberOrPercentage = NonNegative<NumberOrPercentage>;
impl NonNegativeNumberOrPercentage {
/// Returns the `100%` value.
#[inline]
pub fn hundred_percent() -> Self {
NonNegative(NumberOrPercentage::Percentage(Percentage::hundred()))
}
}
impl ToAnimatedValue for NonNegativeNumberOrPercentage {
type AnimatedValue = NumberOrPercentage;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.0
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
NonNegative(animated.clamp_to_non_negative())
}
}
/// A type used for opacity.
pub type Opacity = CSSFloat;
/// A `<integer>` value.
pub type Integer = CSSInteger;
/// A wrapper of Integer, but only accept a value >= 1.
pub type PositiveInteger = GreaterThanOrEqualToOne<CSSInteger>;
impl ToAnimatedValue for PositiveInteger {
type AnimatedValue = CSSInteger;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.0
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
cmp::max(animated, 1).into()
}
}
impl From<CSSInteger> for PositiveInteger {
#[inline]
fn from(int: CSSInteger) -> PositiveInteger {
GreaterThanOrEqualToOne::<CSSInteger>(int)
}
}
/// A computed positive `<integer>` value or `none`.
pub type PositiveIntegerOrNone = Either<PositiveInteger, None_>;
/// rect(...) | auto
pub type ClipRect = generics::GenericClipRect<LengthOrAuto>;
/// rect(...) | auto
pub type ClipRectOrAuto = generics::GenericClipRectOrAuto<ClipRect>;
/// The computed value of a grid `<track-breadth>`
pub type TrackBreadth = GenericTrackBreadth<LengthPercentage>;
/// The computed value of a grid `<track-size>`
pub type TrackSize = GenericTrackSize<LengthPercentage>;
/// The computed value of a grid `<track-size>+`
pub type ImplicitGridTracks = GenericImplicitGridTracks<TrackSize>;
/// The computed value of a grid `<track-list>`
/// (could also be `<auto-track-list>` or `<explicit-track-list>`)
pub type TrackList = GenericTrackList<LengthPercentage, Integer>;
/// The computed value of a `<grid-line>`.
pub type GridLine = GenericGridLine<Integer>;
/// `<grid-template-rows> | <grid-template-columns>`
pub type GridTemplateComponent = GenericGridTemplateComponent<LengthPercentage, Integer>;
impl ClipRect {
/// Given a border box, resolves the clip rect against the border box
/// in the same space the border box is in
pub fn for_border_rect<T: Copy + From<Length> + Add<Output = T> + Sub<Output = T>, U>(
&self,
border_box: Rect<T, U>,
) -> Rect<T, U> {
fn extract_clip_component<T: From<Length>>(p: &LengthOrAuto, or: T) -> T {
match *p {
LengthOrAuto::Auto => or,
LengthOrAuto::LengthPercentage(ref length) => T::from(*length),
}
}
let clip_origin = Point2D::new(
From::from(self.left.auto_is(|| Length::new(0.))),
From::from(self.top.auto_is(|| Length::new(0.))),
);
let right = extract_clip_component(&self.right, border_box.size.width);
let bottom = extract_clip_component(&self.bottom, border_box.size.height);
let clip_size = Size2D::new(right - clip_origin.x, bottom - clip_origin.y);
Rect::new(clip_origin, clip_size).translate(border_box.origin.to_vector())
}
}<|fim▁end|> | |
<|file_name|>service_prpc_pb2.py<|end_file_name|><|fim▁begin|># Generated by the pRPC protocol buffer compiler plugin. DO NOT EDIT!
# source: service.proto
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the service.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'eJzlvX10ZFd1J0pVqaTSbbV0VN2222o3fV3+aKktVbvbxoY2xqOW5LZMd6unpIaYGSyuqq6kcp'
'fqirpVLcuBlWQyhI98vEcAY+eBSfgyMQFCgCQDi5dhArMWvIQk7zFkzcKTYQWcQAjmw7OAISHv'
'7d8++5x7bpXaNoTO/PG8Elq177n77LPPPvvsvc8++3r/UPB2x2HrQr0aljdbUTsqDtbqcTW6EL'
'a2x/y1KFprhEf4wUpn9UgtjKut+mY7aunGpX6v7yVRvVZqe2qWn62ElTDejJpxWJz1dpn29ai5'
'L+NnxncdK5U1zrLBWb6z3ghnLd7FsF1xXyuOeQWhL96X9XPjgxX7+9hJb3DWEFs87hUMCcWRsh'
'1EGfSN7XcA3YSWnnP3N/u9gupTz1H7Vcb7WKYwxD+Kxx7L+DPR5narvrbe9o/dePQWf2k99E+d'
'm5n3pzvt9agVl/3pRsPnBrHfCkFaWCt7/rk49KNVv71ej/046rSqoV+NaqFPP9dARjOs+Z1mLW'
'xRk9Cf3gyqQEyjIoIm/ZeErZgG7x8r3+hRg6DtV4OmvxL6qxG95Neb/Nap+Zm5M4tz/ipxsOx5'
'hUJW9YNs+jNXeI4q0N+HvalCP/29i/4eUZmxg75lmF8TPhCBwlBCMoTmhQy9UFCjXsS/soRgWG'
'XVTWPLvmEejbXdaTVjP/Ab9biNsRosfkAkBr4jKz1TbAYFQmKMZcMPGg3qXpkOiYRhGs2wA8kS'
'RKm9DiRHkIPqiHeo0EckFjHGsf0+Jhx8Jo6FG5vtbX8jjONgLeTR9THqIo1uyLuZfmXUZXrax6'
'73uwXjYhyit/i9gtrntfgXOLSPOHTz2Io/mwgvDbDZDurEJUeiY/CKRmtxToIBdRKE7U3DvEaD'
'ONQKmnG9Xb8AOjZDkpVmtc4UKNNnoZ97vcaBZAhyLbEkgeQIckzd5J0QSEaN0TsHx475i3bCWq'
'Ghxm8GxC6fVueFeo1kdGWbppKluuX0nCn0MRYXkifILpKZBIKeimrMgeQIckA9d6WfV/9N3lt/'
'K+c9o5IpjnSpjNJpb7RHpIrP9/qwEEjP5EjPXPsMeuYswBV+o/TVPm/PDk+LRa8P3GDNNVjhv4'
'v7vAFaq+dJnEgbAWx+Fp/reXaetvflWFU5kOIN3uhmZ6VRry47zTxqlq8o/WA2aXzIG9kKg/Nu'
'013cdBhgp+GMNyTivQz52dfHo/d7Rt898l3y1hK9VJz2BsNmZ0NjyF+Ef3PUohtLAa8JigGRoH'
'39jOBQDwIRt24c5j0aymB4f5sUIHaLAUZy3Q6zGDZq3SiS94q3eAORXmb7CrzhXLWjICzoNhXT'
'uDjvKa2ml6Gml+vN1WjfICM42DsQbjhD7eapWWU4Tv0uXu71x9u07u/fN8QSIr9K/6HfG3k2In'
'abl1/FKHm7e9Y80O+kmdj/EzJx2tvVDON2WNMSkXuWMuXpl3pFqu8nEqmf8UYsScukDNeMbB55'
'JkrKc+a9Cl6rDIep32SZeFEzjFZpeVUbJCc7c2kBTXq4FGlotVF8QSJqAxeRlNN6kfVI2zlv2N'
'gKMrJBJqL8jCOryGt6YLtb7s/iNZ4FLLNYeayFhgzwDMHGHvCG0+wp7vXycTtotVkK8xX9o6i8'
'HCkZ1nL5Cv4s/qtkwDke8PW9M5rC3D3usVu93akBPNuuS6/yLtsRNQnJ3k6z3myHrU2ySGjcuq'
't9fztwEZk757bWWCp7Or3Aw4OFrw2on6f/sqVP9Xt7d1ozOy5fWv4kwSthi5mUr8gvWhH5RrAS'
'Nmg1ZMaHj93wrFZl+RReqeg3iy/y+kRFA8PhZ4cBa6nC7xX3e4P4V8tGP9NcAAByAUObl0ktNF'
'ub/Q3BqoWrQafRXr4QNDohCzwJlgBfAljxoLdLr6o6vXM/a898RS+0eUDQ/X0xrWURTe4CAO7+'
'1m7FfWDn4fWsJdoqtTUhUx809o0SgkJlWIMXBFr6/azXx4plxNu1dM/ZueXZhXMnTs2pTHHY8x'
'hw56mF6SWVtb/nzyzdcrPK2RfOaUCf2+CmYypPAjukEcz/zNwstehPQ6jNQHG3N8iQEwsLp1TB'
'4lxcqsyfOakGLc6TlYVzZ5VnMZyeW1ycPjmndtkWJ+5ZmltUQymyqIvdtou5M+dOq+HiqLdbd2'
'GIGOkCEaUqIURjGU0BqEWxNOPlWQxJ3IdPTZ+YO7W8cHZpfuHM9CninYVV5v71ufnK3Czxz4Gd'
'nZteIliuVPX27qRQd1xCjixkLyILjKtbFkpfyXp7dthUduzkDi+vZVlvsxM77k4s2T1bLb/nmh'
'q5i5gaQNEjsC/vUf56f7zl2eyPDPvxNoH8DpvAbd5oD6JnrYx/MePtuxhznkElZlMq8bZuDl59'
'8UnometHM97lO5uUO9LwIq9/IySX3ZhVvXvXaX7cPdnylrvb5y5mF2pqeih9fda7bEfkOxJ6wP'
'Pqzc1OW5tOWhMPMoSVF7Rsp22f5/i5p0Hc4PkJoX1M6HMvMtIewbzRU9VGPWy2l+N2Kww26s01'
'3moKx/OrQSMOKyP68aJ5ije0b+i80Z96Qz+2b5TeOOjtcgzw4tXe0H3BhWDZOFWaE7sAOyuO1Y'
'3eXm5CY6SOqo0gjplpBW5axLMFPJoxT4rP8/bwGxu0N9U3G+Ey3LyYtxxL2ShanJYGoCgms/AA'
'v7YWNsNW0A6Xw1d2qO0yueTL60G8vm8vEJzI7stUrkTDk9JujptNN2t3UaPice9yxkIcoQEvV9'
'fD6vnlTnv1+fv2u/0zhYvcZgZNzlGL4qI3hMnYqD9ANEct3kOHd1BNDgfLC/LCafI/jucXz87N'
'zVZ2GSx3Ri0I1FpkGbxLC9RaZNhLzKpW9ZjJN7WxNpViVrV6UjcwMQNaD5clzHJfHO0ZZfer1O'
'Pmdu+LxVSPm9vdr93q7d1c3+x977D7XpGadL94HXvmrbBKc1Xbd4Xb3HlQLJP4V5fDZrBCEhO0'
'6I9430Fu3NdudciLqFbn+OE0Pyse9kajlfuqWiKXCc1q/f591zJ7R/CA5fEsg4sThDteD1qbrJ'
'Jjmoxw33W6qYafMWCsiHirvto2GA/pFcEwwTbuKXAi1fE4NxsmuNsvbQZomXQ6oQ03AiY93uxd'
'jkak6IJa0A6c1pPcGmw/LQ9TdLY6K9tWsKY0nYAZ0bpkxnnpuDfkyn1x0NOSTwYJGUEzC7MwX1'
'42R7YImVGn5pfmlivnzizNn55TOcewv7uvcL06VPqLrDec9tSKL/SuMGGVOGwvb9VbvCA3Ar05'
'WvnZK60Ww/ZLqc2d3KR4yjvYjEgBkOIIWrXlJKC1HFRJIONIb4QWy1XNaFEaJzvEtDTtEt/cxc'
'SXrOuNYJPkt93aZvu8UCkQYA6//0XcJOJmQQ3S/w4qr/TXOW/Itdfh/lR5x8qwTrvmaa378gy2'
'suP92jiu6DdhRkDYQm2MFCryq3jS678vZtz9jHun2J+D++5FRj549+LymYXK6elTFXm9eKXX1w'
'ge2E5vegx6tpNAGBCgS281DLqEi+GIl2d+FT1POKaeUyx4fTMLFSwIWgEaunx2fm6G1kTpeV6/'
'ZgIWi2UDvaR/Co6MeXru9Im5isqmp7pP5UsxrULHDv+Xccb/Y8bb5djVMIiCRiPaWg4a9SAW0f'
'AYNA3Is526f6Elklf9pUcynuo2bLvIzPyvJLP0tow3nLZmu8i7+n8peV/OertTNuyzpe6V3mi9'
'Fm5sRm0Ez5cb4YWwsa/ESqM3qJjqoTyfvHcKrx3fMz87d/rswtLcmZl7ls+defGZhZeeqah6V7'
'NLuOzPeqqbqOIV3k5k0cre442cWaA9kTbGuTvvnJtZWtRxD9t6KbXAS7+R8/bsQAmpce2xaCdq'
'6tlQX4bNcJZcSXFwyBYiLjXb9dU62fPaB9duzEgC1yGlSa+4GelDsGXCK43h1vRVlHky32zb1s'
'1wLehqDWWeqyjzxLYm+6UWdWDr6XbYOzKVXRpmm4gVn0S9hsgUY5hucsgbCdbWWkBuEGm/ZNiC'
'ueHY3V7B8AFbNThBphM721kEwprmIXVaj5eTIH6Wnhcqu+qxDYCWHiWDJX0IQb5LoRGRkOuTds'
'zN+DOcW5RPSfuKfXPs8xmvYMC03fZtBu11Rpc/kVWZCv8GnCzAJouAwPEb89oIgxo7PdHGBs1k'
'bOZV4DMCxllYuxXUG6m2fdxWmQe28XHvSoO3RlYoOVS15KV+Dm5cIQ1m5bl5t/QXGW/UuGk1y6'
'zTnhc0m1HbZVevKPe8V562L1UcBGMbnpc8uSjbaJ+SEyY+ptSOvadB8OcQflkJ1+pNiRvrHyb8'
'0mfDLyf+twx5bNFGN70nVFd0Ib4r87IXrdXb652VMrU/shY1guZacs7Kf1SnyJ+aWoucU9fbkj'
'9/kMm8M5s7efbEY9mxk7q7s4Y9lXC1EVYx5Lv//ImsN6gOqeeoXxlQGe+xkcIQ/yoe+9SQzy9U'
'o4Z/orO6iuP+KV+jOhT78Ch8VhjVdYSgfG1je6msixufLy/4881q2b9IwsV6u70ZHz9C4yA9GG'
'1SR4ZBGPumEDG1ook44nl+JazVsZRXOnxWj4P3ThwiuUISNgBZqTeD1jbTFU/6W8RNP2rxv1GH'
'6NyIaqSu9IKZ5HN06nmj3iapSc7QOdcBCRurEawSklMkBtTqOhuAXiI8Yfs4kYT/DncRxvkCbg'
'rJRiduI/0ikDSQYCW6gEfCMc8nUaSNe1Inn5jsDLfHZq2LHCTHNIL6Bg75L0IEdebwwhBBY6x1'
'iDBLh5cQ8s+iwzNJLrWo2sFaDswkHSH+R/Sk5ZOkkJ9Pm3zCap4geuj5LvV2UGfCOr8JxFC4IM'
'iVrWaUPGO+19uxx1kcjCoi6d0ItpGIQ5JCxEc+LUiChhAKImKD9mJf86SNpI8WbTg1f5UeeCYV'
'aLW9BTERCfLjzbAKCaK36hCsFmSnqaUojpl2z1+6a37RX1y4c+ml05U5n/4+W1l4Ce3cs/6Je+'
'jhnD+zcPaeyvzJu5b8uxZOzc5VFv3pM7MEPUOW/IlzSwuVRc8vTS/SqyV+Mn3mHn/uZ85W5hYX'
'/YWKP3/67Kl5wkboK9PkJs8tTvrzZ2ZOnZslP2DSJwz+mYUlzz81f5pc6Vl/aWGSu+19z1+40z'
'89V5m5i35On5gnz/se7vDO+aUz6OzOhYrnT/tnpytL8zPnTk1X/LPnKmcXFud8jGx2fnHm1DR5'
'6bNl6p/69OdeQpaJv3jX9KlT6YF6Ptk1cxVQ7w7TPzFHVE6fODWHrnics/MVMncwoOSvGWIeEX'
'hq0vMXz87NzNNfxA+yhIiieyYF6eLcvz5HreihPzt9evokjW78mbhCEzNzrjJ3GlQTKxbPnVhc'
'ml86tzTnn1xYmGVmL85VXjI/M7d4m39qYZEZdm5xjgiZnV6a5q4JB7GLntPfJ84tzjPj5s8szV'
'Uq5/jcZYJm+aXEGaJymt6dZQ4vnMFoIStzC5V7gBZ84BmY9F961xzBK2Aqc2sabICbN7PkNqMO'
'iYk0pGSc/pm5k6fmT5I1OYfHC0Dz0vnFuQmasPlFNJjnjkkGqNNzPGpMFNHl6b8d0Z3k+fTn7/'
'SnZ18yD8qlNUnA4ryIC7Nt5i7hedk79qWsJNsd98+TIoia/ypR7P74ixnkvyRo1YIJWucnAqxM'
'gkSkhEhhNXo3INrf6mukzrap+WLQvI9W9Mn1cCPYCtqT/t3h6qo/GwZQ56SfWNPEvAhJL0i0J9'
'bKidYztnKbJqb1Vbhab4qCs9l6epPm1oSLFgBZivWaCzYJfpz11YDVQRqM3JZ2YxtqJvB3SFDy'
'rBYJmtuiE5G4gi0UynI8LK+VbZuWNpGg0vw6eQ2tdjwhWYMTtFfvo78K6gb66xyAhV3yN6CT9N'
'ckQzPyN6BT9NdRhpq/8VeZ/rqVodfJ34Aeob+uZui18jegN9JfBxl6UP4G9Gb66yrv5zP096D+'
'MdZ2swk103gDIoaZSHcN+xLUKZLWyN3DVt6yJsrUCsTC84PGGslFe32DpCBqHmr7W1HrvF/rwK'
'D3V6KoTZtGsLlJv4g1DU4rfD5RcFxlxu5lCTCGBG0KG5s0JS2eOH380jNLi2HbpAACP8RDU+9p'
'UaC58MkHiG1eIfL3nq8K6kpvmH8hr/AFKqvGOYdOJz32McRzIP0E2aWe60AyBDkoeYEmVfIF6n'
'p1yDvKOY+305heRmO6xuY8IpETQ2qQ3+bKpZPweDsRtt97oU14fBGRsb80qcUXO+YkMbzBThZE'
'tkXcdEyXdisM3dTFPn7fheQJ4iYQYhgvUkV1eSqZ8UXqSjXmTdlkxjsIy3NLB3yW9dJqFBFF+K'
'e8ErToz7Bd7cpbvKMnb/GOnrzFO6jbK1N5i3eoq9QB71aBZNUJwnL12CH/jLEUZEJ5Yem8Sasg'
'HAKyRMCJFAFZIuBEigCM6gQRcJUDyRHkoPK9JYHk1CzEYmzW59QKTQLkS2cSJnQIWWJL2bxBbY'
'6xxeZQlyPqZlPU5Yi6WaJOOZAMQUZVyYGAmutIuF4tkD51krBcP7bRTR0Coc+ONtKSd9J61sbZ'
'FLsI0OUb9bWWVm5Rs7Fd9mcjWJmwxpxh9AkBLiRPEHcYWAUnaRi+A8kR5Bp1nfcCgeTV3YRlcm'
'yCXY52tDnFAaOUinc3AoeEPJFwd4qEPC3Tu4mEMQeSIch+4lsCyRHksLqBlz8g/erFhGXKtugn'
'vC9O4e3nNmb5a0iGIAdFaWhIjiA3kN42eAfUKcJSti0GCO+pFN4BwnuK8F7tQDIEKakJB5IjyC'
'TRZ/AW1OkU3gLhPZ3CWyC8pwnvQQeSIYjv4C0Q3tMpvINqIZXoPEh4F1J4BwnvAuG93IFkCHKF'
'w5lBwrugribZ/X5GQJ46R2iOjP1NhtQ7z2PYqCV52yalKLWv0rLpaIfO2hiOf0aSe0/UYT8hDl'
'ZD2r9b4QY8NJYTxLfI/NfdmK15PWjhcNxvdejpBjkvq51mVXdcb2+bxZNsgeRDTzHIpYqQN2nz'
'Y+OHOoV1I74w3CcS36jhJo97xMFzKQ56xMFzxMErHUiGIGPqsAPJEWSKZvgVAtml7oEGHjvL26'
'NOdbXKyNlG9OPOpqx9DpXpzPMSNztWYntL/7ip5FC6iyi9J0XpLlqa96R05i6i9B7SmfscSI4g'
'+0lpTxAkq15OW96racvbn9ryxKDj3HvZ6qB+X05b3RUsfFne6u7FVseos7J53WsJysrmda8lKC'
'ub171288rK5nUvb14Gb0a9grActi2wO70ihTdDU/IKu1iysju9ghbLdQ4kR5BxWj4Gb1YFdhFm'
'ZdMJUnihWIIUXlAT2EWYlU0nsItQ/64SlhtsC2wX1RTeHOGtWiWXle2iStNwvQMBngkat8Hbp2'
'pWyWVFf9dSePsIb80quazo75pVclnR3zVWchqCKzCrhOW8yiUQemuVNoF9nm8hmN811acOlIYQ'
'BGh04jpvinvdFkQR2gx1QfME3a2KXdAMQfdQH2lojqD7aVN3e86odcK6n3qeu3/nniET6z09w2'
'pZ7+k5w/j2kMyloTmCQu6KDjSr7iOsR1ItMRP39fQFWbmP+ip1QTMEvYZmMQ3NERTqwcxtXjVS'
'soiNsZGaW2yMDat2NCRDkDFHFrExNlKy2K+a0Nu2BTbGZgpvP7dxZRwbY5Nk/LADyRHEpXdAbc'
'J0sS2wMW6m8GJj3EzRi41xk+i92oHkCHItrdIPZoQ9GdUhNPer3Nj/kfE53w5a0gQw/Xaw5uus'
'uLjsV3aA8oYCW8cEn6DgeRuJEXyy5wU+pxRasypGlMtcgEoQSyPCyeFCHxexGsFm2bNLJcMUe6'
'QGfQvBUrnwNEvFWPYXUkJkrPsLKYE1Fv6F1FIxVv6F1FLRlv7W0ywVY9xv9fSMpbLV03OG8blL'
'xRj6WykVXVDbMAHtxMKe2U6JA+yZbRKHfQ4kQ5Arxa7TkBxBYNe9JiOgQfUqQnPdWCeZE20TcO'
'hx0t9ar1fXd5hzM+U7TS8CC2vEl6aObeJlbQqEiZxFzWpYTqiHHfWq1HgGiV+vSm1lsKNeRVvZ'
'QQeSI0hJXevtZrX7c7S3/lJGZXj/hKj/HO2fB7wK/4LQ/EKG+jg+Nq1daHi85KqTIRMaC4td4V'
'oUxvDJW2E1WmuST++vhy2S2sUwtK7KqMFJhDNWF9QPEHaJBJQB6KC6yQHlALpFvcD7twzKq9cA'
'z5Vjp/0ZTm+M2aVnI58sug7RuZFQ2UxWWmwveNr15VI6orHT6An/bpKPUQMgohg06oCyAF1GrZ'
'7H29/rM8TSJ4mlY9el7JXEbiRKAudmIyYCeyK9WKCVU+SfCF+8AYP77QxtgqMGRs0IOqB2eT9j'
'QZilX83Q6tk7NuPfCGPSigziKmGrhcuunr/Qwp1VerwV1lv6GXGAprJeJTO4FQYxDss97zIXM3'
'XIuEe6wFmAi2qPt8cBZ9Svoe2eVNuMAQ93gbMAj9LqfpUDzqo3MoqxNf8MrZ6X1ddeRoqUfEsy'
'z2tl3z8jZ8FWt7aD86F/9EZaX+2Q9C/f3nVy4P36KqlJ85Jjujbq58nA7xouhvDGXlqFLNDqDj'
'enfp3ZnmqLNfTrvRwj+QUYHHOH26fe8lMc7k3HfrzhQure0jtc2Nxv6R1uXj2Itpel2mLDZ7Dq'
'AmcB3kPMcVH0q7f2osDe/tZeFP2E4q29KAbUQ2hbTLXFNs7g3V3gLMCKlquLoqAe7p03qP6He+'
'cNMdaH9bz9bcaBD6pH9JL7C/I+g7WpWtiob9ThHtnkAVpyJ1tRZ5M9FKy7JJuF/SXsDolXRUpB'
'+09l/65oi7y/1qQOf9/kIezSCO1JWuzH5HqSLonb9UYDWwQyWxG3Y/XMm8sad7zF/iq7megRF5'
'ND+lM/lN0oiP1O83wz2moKpEtGsIU80isjg8SXRyAje7xjDthT70Dby0sH/FNhc629vjNjUqjg'
'qr6jd/496uEdmP/LvHEHvEu9UzN+Dy2OLbDtgr1xn8YLx/KdvZTvIrzv1JS7QjGk3tUrmkOE4l'
'29QjFEKN4FoUiL5m717l71t5tQvLtXNHcTindDNNNrbFi9B22vSLUdJhQMHu0CZwHeS5aQi2JE'
'vbcXxQiheG8vihFC8V6NYtIBK/Uo86J0BfRLnFJLOr7uIlGE+9FeJinC/ahmkot7VL3vx8A9Sr'
'jf14t7lHC/T+M222VGvR/b5e+622VGQwfIrjxiQdguP8AcGhu76HaZUGGM3Q+ktU5GdsEPYOtP'
'JkCbux9MT4CxWj/YiwK74Ad7UWTVh3pRAPOHelFIa6AYYSAG+HsZDnuMGgBtNL+XmFx9Ytb/Xo'
'atxQSUAQiRjwSUAwh2tUGeUR8BpqtsGxjvH0kjh+X+kQxHbRMQvzhKg0pAOYDGiFCDPKs+mqYc'
'W+RH08ixjXw0jRxUfRTIL3dAOYBA+TsyAsupP9T24q9m/PlV316ohBDEYVvSHRCKM6Y7QWmTpb'
'YrEWcn1CX1wbzpSQEG86492mqyj2iv9E367oVAeH3JhcFyMjSEZf4wPVrEZf4Qox1xQBmAUFAj'
'AfHYriAr9ONZgfWpPwIqf+x9WQ7Im4AZBoCb2rpqhRBej1OZFfibA2s8ePPE8/leWaxzNQL/UP'
'nQJKx/BF87jcb2FG7TIAcQpssCDjW36qiJMnPDDVMwQPy4GuGAzvNbnYYYJiYbg0z2mu3WH6+X'
'qe/VeivW0Vp9PV3qbIgNDbq9ZFQ8D0ELOWRclqOZtGPHhbbPSRwuY0OOGIhjLs+3VyYmnInoM9'
'xzQXmA3AUDrfNHWDD7HVAOoOeSy/ULRuzy6o8zXLdjk+ch8UKenvckR4gsy01esHQerPagr6LG'
'hbCW9iSDZpPMe9rOrXA640Hs5o/T48lrutzxwJj74wwX/khAOYAOkGf2USNY/epzQHXt2Du1YJ'
'Es4dKOkScbgU+F2ds0Ds46ImVrUoJksGymyqnRShQ1wgCsKeHmTglLpcTZwCVpoVM4u/uR68y6'
'Gzxhb3Acy5h8sGBTcwuH7VvB9oTpDEZ0F6IZ216TpZPWuKX/otv9o8eez6ImjRAcX5hdGNfZDR'
'PHdRLDFPkd2oa/I+E3wlyfS09BP03B59JTAGP4cxl22RNQDqCSusZ7rRGpAfX5DB9jXsD6ZP2D'
'8EEsRwu1kAP5gc9XuI2cuOfZNFWHYj8ppeDpozvfPUqp6zA7HA89VWQ78huOYCHI9vn0qAZoVJ'
'9P62fY55+Hfr7KAeUAwsnoj8yoCuoLelTfyPh3Ly6ccZaEIarMcQieGtHaiKz0HO2XRW150C/k'
'5wa6aeCX7DX2kkQGoBMS/JMyTH5EHOKekK2hLW1oq7Kr2upt5EKGyKTT2WI6LQ6o6AHTT/QR58'
'lKbbPqq+NAx6/Sk8ZMELuLFBGqL6R5WSBefiEtIXBUvpBWOghSfQFKx7cb6aD6Iq9R2wbhoi+m'
'kePc7YtAfoUDygC0zxE/RIy+qMXv9wcF5qlvZjgG9u5BZjSt1ESXBeLI+CVzBFcq+y+FQrZPrH'
'yZFmAk102qnkdKIHREFaGvtaBVo51Cjunxkhz3aIRd1QNYkya06BfMlrwSNiJIfmSXBZI5kE4E'
'7RT5UaNmyKtKMIlFwVLDyPnaEhuKaZmEXpbFFvvanQrAihKO1dbDdr1a0s8nJXeyhz4k95Be54'
'xSXnLjYVBdNyTZIeqX1kKkAtH/oyPbhe5houwvGogQFdMugqwBe2BvjiM5yqhJqunzeHODlnXl'
'9Nn5nZBZKwcRJXh2q21sPbRESuRgNmSknHrlGh7av5zsmTUTPKzCO9UdxyHtZxiRTi6exERhDp'
'pRc4o2kZCd6DRe6p9Uu8yRnTXrTWPdscOM9NQa8n5FW9qukKFeb0DStmi4JteU9cdWC9mhiTxj'
'VyJm8dFoPTYJYNSUzCAWjhbSx6Zop+KEJCd9SB+pMltIKcQ0KZw3sUXTDgXeM6buyaVRxtGkHy'
'JUjBPotXUxwDB3rfCVHXL54ehHPXxYkuUZct4qTda2pHPQ6G3aVTsJ8tKgDmFNhjSt9VUbwKy7'
'Z680CpRqIwNgkmQXlLBoyEyY+fS6BwHB4NjGsS65jo0UQlnrNdII1iZd8rYJO/Lhtu00egkStk'
'tfkS508QpHw+Lk/JtpJeiRhv1m2r5GQOKbsK8POqAcQIibX8uW4HcQ5f0fiPLuTUV5zfgQ1IU5'
'9R0d1B3hn/DMvpv4N3nxzL6bUJQXz+y7ic7Pi2f23cQzy4tn9t3EM8uzD/RUovPz4pk9lUaOY+'
'mnEp2fF8/sqUTn58Uze0rrfBzB96vvY7hvzNJwr3SH20wcCRkz7JfvZzjdbIR/Ysw/SMbcL2P+'
'QUJWv4z5B8mY+2XMP0jG3C9j/kEy5n4e8w8zfEpp2mDMP0wjx5h/COTXOCB+8Vo14YByAOGk0i'
'DPqn/McO6KaQOn6h/TyGEO/2OGs1cSUAagK+Q0o1+8UQIhf+UjfCbOZ9z/PkuoXptVubFHsjsc'
'LBq7WodxnSNAievudKyIO8T1rjNEzNGOB4hd54cIQYfmyoNWFkjFJ8Xb1qbN01a00l3Su9jJ8b'
'Y5ZfTjTnXdPGJdGWySLtxs1Wnxco6NOAVMsWTZ1Jvtm455pA42yGItG3brNABim0fidLUFQcZe'
'k93hdPMytwnNHTca6gLnAd4tUZUEnAEYB5xpcA5gnHC63WfUL2XliPNi3UMuf6m3e0RKfqm3+4'
'xGiVPONDgHMBbABzIiWjn1KxCjG3c+nL6oDKUfdMuSB6uV17Y875YnH05DoBP6LipbniNcZiUg'
'xMEEu6B+gHaJd9AvIQ4CHVA3OCAeaVkd8SIB9an/PcuO6L0JBQnRFz2PbYXajN/xyNXb6czV0N'
'BnunRBeYBczYWwAIGMD9cvYQECQaHeRD8G1JuzpFAfzvYk8mrKzSmh1q+iWuFE0WsFsfIHWOzf'
'krWqdUCE/C0JgQMi4G9JCBwQ4X5L1qrWARHst2Stah1goX4wawN9AyLCD6aRQ3wfzFpfb0BEl0'
'Am0DcgYksgE+gbYNX6UJazo00bzOZDaeRQrQ8B+X4HlAHoKtHJA6JaCYQM6esLqGPwNrD2t8Da'
'y1OsNbWSNTfhRr0N3Hwu01Rgbr494WZBuPn2hKaCcPPtCTcLws23J9wsCDffnnCzwHQ/kuV8BN'
'MG3HwkjRwb1SNA7jsgfvFqGl0CygGEnASDPKt+M8u5L6YNuPmbaeTg5m9mOfslAWUAQvpLAsoB'
'hPwXnOsMqneDm78Hbpa60u9wxVpHGdKchQ/57iynEYzwT3D2PQlnB4Wz70noGxTOvifh7KBw9j'
'0JZweFs+/RnP2NjMAy6rezHDZ4XYbUL2fz4/qHTuznSBprAg5u0MLm+M2OMTSyWslrI6dlp2J8'
'NqxG5minbW7RrcJNiHrTEQ29mOXfTg8Ua+a30wPN6DEUZbENyiwT6KCs9kGe5ceAqWTbYJYfSy'
'NHtPGxNHIw6DEgP+CAcgD5NPEGeU79DjBdZ9tAUf9OGjkU9e9kOWMmAWUAulJEdlAUNYGuIUP0'
'LgH1qQ8B061jt/rz5p54DK9Ce9q+LkEFx1IXdTJwc2knIaHP4HJBeYCMBT8oGphAyqEKGvhDmq'
'oEVADoOnWLAxoA6Eb1PEt7Xn14Z9p1faxe2gXeSzuCsB9O057X6F3a4TV8OE07grAfTtOeJ9o/'
'nKY9T7R/WNP+YSRBeeq/YO3+eU5ljp3xb//n/+f5UlHBO/bnu/05RCdsfnOSVq/vtGI7XQ8uWK'
'c5LvlBW9/wdteh59/HDqO9gx0727X2+fjiLC0x2pZNbKLmxw1cUyXjsVbHxTCaFuzciDUE2ybO'
'6dMW2o7ZHdc4YIGSr1/f7DTY+7dRQzNVsIFNIhEQ7ZxIFMQ9iUShf1hY4+CSUTptjQ2yHbIdIm'
'EifZGIwwR1aJc46nLD2ZLinKskXG4ssg0cCzTjNnnEOsLBmVlclp3TjMRpb6aITE5NqkGjweXs'
'e8tclN1QMGs7nk+rLOX4q9qK4phjP70s8F8a6pMY7btbbEhViPQs6CCtw6MtPrkJScfWG+la9J'
'NglZaOzYjo5U55DmNN2koYNj3NN51hQZxEGwc7VD+zHVNtqzLY7BqWlfRsy6Xh6noUhxzT01ee'
'4+Oef5jDOKahpowDxSZ1j70ym/GFKSb1QM5PQ66dk8DienPUWiOpfEBuohNKPoe+f5Psc076aJ'
'g+JpmRkmwoJD/vRvoPWNrrLQ4HvQD/mcMiOe3ajjr6xnPcaclCAKhmr+DoafBlZhk1U2GVmlBQ'
'fppRr8gVpnhd2M/D17ea+FaDxYaecEihrw212edr12Grh1MbiMGZQgdTa41oJWhM2RmcaoVruB'
'2+7Vwk5cFHxmZ30mltUu4iEmi2zVVzDJ2vfN8XkmPLxj9ftwSehRUAySuYmvE3G521enOCh5J6'
'ZStciettHFKS8XOB6EX+zYRc7GjhXKUZARkfJIPt9282WB9FW8x2rLUmPxHO0yI+F3c4JIfnLD'
'78ctRkXnUPqczXVHSqI7ywsGeepNwBEOEo2iJgsvT04tik025LeFLURdxZmUqlRPKRmF4RZnnH'
'+l4maT4tdnzRPxahDZCqhMXh/7hlHYBEQ6+JjJqS02dE3vmLFXx5h7cCrT+gEzAd9hMIRhARP+'
'xsimQEHSKfVleVORzEfIVYDn+MjGjTFZG//wLTdY++ZuSx7fol7NnXjD2RIY60ddD4bpp3X8rn'
'QWW1YLIQZtK9ySYlx0FEvcR+hTrSUY2gyhvhiW1zcjnpXLsxiLF+Yxxnr3SSkEy02oaaqzedKI'
'oNkKbet9HUJlLAMHY+vteysUJttoJWLTZBFjGStW3iiY3+pcRc8cRG/1JiXXpio38J1uVzHVAO'
'IAS7/lNWYBn1FaC6Yex3s/bEErqZzyWFc/b4zL1czRUXcLFXTnBFjXo+1xTV79raEY5Db44yWd'
'1iS4x0Dgcb80c9WvKv7NRpK9UXrPRZNanV0tQULZhlro/JX5gQQdFqQILW4zzjtPkZSpOjB30D'
'PNY+gWh8plFP4rZcL9sKuFuPdUO9WocpQ1tMJCF4PSoMY8KZEPgSX0lPCHyJr6QnJKN5XRRv0B'
'Nf4itZvrjz3pzAsuobQHXb2JtzMiH26CxsJjLOlpOINFdPoTlLjojotc2A/yyDY/putNYEsA6T'
'e5eiUyblrIA3Z1my2siyNop7y5rnsKO/shKHNvsDtgaJ9WEsdEkMqZN2q2neOXKhkzs4936nMr'
'SE3qZadr2pezEJlp6zqHF2K/alPunlajhrYTvxF8cnjHccINmaUOAosLntOQxxbJIuBsh1VDN3'
'cJ6+kZ50+CTfSJwGT3y8b8BpuMYB5QC6XmIsGlQAaEIdd0ADAN2sXsBZeh6/9iT6mxvbr1eQaE'
'qc0kGbrOssPYMAbuKTafqQmP9kmj64iU+CvikHxB3dqI45oAJAN6lZzoYTkG53s5rxvm60SZ/6'
'Hrp80dhfZp9GeI89vfRap8Bj22lLn2fcXw31eGGI8sEhAtbtNr5HBPW7EWh3RLaRgI8kzy3dOf'
'V8j7NEiJhXdvi8WKsAzlaRc2LPl1qlTnkDTVQtsoY1tbJSGTh3FsTu0zKJYqSb7TjpPN13bE4m'
'Ze6Q/9AkQ4R22ipyOLSVbc1T7rsBk0ausTrz22eY7YLyALnzC4f7e5jf6x1QDqAJCeF64nATaF'
'Ld7oAGALpVvdA7yyBcfPgh+vt/cS7yQt+W8rLbhJy27lQ9wrh6McHtGPS1CcI5QNv6vAVhZ/8H'
'BOSHSzfbXpJCCowHmMU5m4TCqgcNY6fr6L5FRT0wssEucBbgIbXbO+WAM+pHaFsc69dFDkpHOI'
'8/Kcu1sMk7iM37FgWotYrJ0LbYDL7dXeAswEh+d/vOqn/Kcv7tre6ga3BWIKgioKfq7dAWeeqS'
'DLcTjIXxqS4wd4Pc/RGZ6Lz6+Rz2GzvziIYwyHNA/QCZw0lPoiE/j+9gXeuAcgAdchQboiEEch'
'UboiEEgmJ7wqiNfvVadFga+3+yiSl3Muoy5GjJcg2oH8eQIyUU6dphkz1IxSR1S0qla2extTzF'<|fim▁hole|> 'J1YAykt5q4hwwyGk3Fn5quokZ+vXYxUczHP9k9qK1Q4j3sxZ1HemzypT7TE5R7coeED4CDFjWX'
'OxEcnuoujujt0JnmxpbOSdFJPc5tFRrLanAh4ixArSaEcPkYYHpLdTmq/eaLs5T2waiF9B6t4G'
'wWJ4kZcj6RRsw1HWqhRBp00ACZi71ioK/FpGxqnUdC3i0nxHTcjQ2Ziw+mpRSZiw/mUhsbDt0I'
'5BpWyFwkkGtYDZD+eTCtfwa05EL/GBVYUG9Ff8nmh4S/t6ZJQMLfW9Mk4KTqrSDhOgeUA2icDP'
'oExOhvoI00AQ0AdAspXUPCoHoorYWRFvhQmgRcI30oTQKOdB5KcwFpgQ+luTBIJDyU5sIg7mul'
'ueCph9FfQiaSch5Ok4CknIfTJMA1fxgkXOuAcgAdkiQODSoAdNgZoUckEOh5RNX3jE+/S70DHR'
'4b+2rGn49T9cCM0N/h+fpbfRD3SKtP8p7J0IfSb+NWlaQZVvlrlfU22if3u2xA29P+EL7bybaz'
'a+dztK/etnuEsT6Qm7VGXntYu800x/ue3wiDuO2mWvLtLmOUcE9mCNrsbKRcetTjeEea1SjI8Y'
'40q3Fx6h1g9ZgDygF0QI60NagAkK+OOqABgG5QN3r/zrB6SL0rx8cnr/T1Jxhik1bHZ4f8PQbJ'
'pzThix1LtZVT7ryntSI1XuuxD4Fs5oYbnGEP0bDflR72EA37Xelh82WvnD140aAcQNc4S2+Ihv'
'0uyP0tDmgAoKPqed5bzLB3q/ehw4mxX3SiRpGJLvpVcTP1FyBEt9X5NwdF2c9EfMR5xdtprF2W'
'SVkrVZ0QaNSpw4ndxIn3pTmxmzjxvvTWjDtr78PWfI0DygGE5X5KQMPq/cA0Pnabbz8twczvIf'
'M2Q0lsQi1ioTiUDRNl709TNkyUvT9NGa7CvR+UlRxQDiAUsXqzMfRG1IdznBjy77JOiM1fxIc4'
'3E2a1x1fqe0NvsH6nTFZ2pzURHND5h7W1aHyIe048XeA4yqOZkwRWp0/FZmpNbc54iPx9sZK1E'
'C8TTv8khTdTvy0WIuANu0mdbYjk2jPTyTxXR/4eE/Xje0l4ecIzh/TLB7B+WOaxbgq+OFcKqA3'
'gvPHnLqa5OEXjIAr9TE9+5uJfG+ubz5buUbTHjnxdpDnWZk7ZK7ik8DOeBSN52Pp8Sgaz8fS48'
'H1xI+lRUbReD6mReZTZjyj6uM5Tmn/UIadMWdaOOajixybezpa2EmB7TgOS7WXkN0z2fZR2mZn'
'i9u9boAlBZlpo66sLUZoiXD4Qf/qQbigPEAuP3Cl8uM5m52kQTmAkN36p4YfRfVHQFUe+8Q/gx'
'/muzSWMV7vfD4jY5JYsMsbzzLnWfGmiAtdad4UcaErzZsiLnSBN+MOKAfQDWrK+4zhzR71aa1e'
'PvJMvDGzipS8DvkLP7moSFb0TyQs3HWvyt1DPPl0mid7iCefTvNkD/Hk02l9sId48mmtD35OQH'
'vVZ3NcKKT5ExUK8exhU7potDEMSgsmi8CcPrlVRZgAGsxn04PZSw7/Z3O2qogGZQAyVUU0KAcQ'
'qoq8Xk9wXv1pjq+JPvDPLivyk49Lm8uoQULEmBokntQgYdCoA8oChBok+hRrUP0ZRjAsWAYJy5'
'+BEbvllUHG0gPKGhDSR3ap/zunnqN+vU9lGCusQoIU1OXel/L8GxG0J3Ich/1sHrsAu1jOuWZy'
'p+aoCSyhlVuWYDV1IdKELZ1tkFvYw6opWMjEwpU6l9ezwcsu7J6gJ/8RJ6VSLyE5665rKzGpva'
'urnxzHqfmh2MeNIw/RUvIi+doo4qer4RYOxcOg3WnhqBr7PWYaez/b7XwZodZVb9jelTFR/vD+'
'gCsDpzIJfNv8zijyf1bXPJe1f5GPWfm3M7dv020dEbwZE7AR3M9PXp1O6g6dxA94KDpvAmww5O'
'lrGLc5DI0lLZabulPl8c3ItPCzv88pd2bcCBew62+s+tt0vVc2bkxyy4okfMf6gChmv6j7ng93'
'ecLeBBEPyJzkasWok4/aW5wO0G7Vq7ZQP89+iKKLVYmU2M0ldXFQqw8WbtIoTyQaRYPyABmvYZ'
'dEfp+A13DIAeUAOiyRbw0qAGQi3xo0ABAi309mBJZRf4cO7xx7POPP1uPEXXLCPRKNM58o80s1'
'5+Cp5JvPlJm8Z2Ixl8FfJflsm4vb+ijBYDLZPObIlPWXPqAkQSLj2JZuFQNNljStmXrYus1vhl'
'sS+dHrLLgQ1Y0kyQmcQ2TJYTFONP8uzWKcaP5dmsUZzReljjigHEDHRI9rUAGgm9WcAxoA6A41'
'633HsDirvo0Oj47998T1N4viknn/zsr7MV1+8fi9Z+3yO4vFsAHnZd9Ocxnx+G+nuQzp+3bi9W'
'tQDqADsoFqUAGgg+TiJ6ABgA7T9LyrX2A59do+hdqib+yHNWNv0xlO65WbTsxwzMZgky+ybGtV'
'IhxEoHXTXLk0VwstRKtRH5AXvjjcxifSJn3+fg/+fBHgy1rsb/eP3uYlVkrNvQ7ZiKLzMRdLMu'
'iE4NPBJmcF8zf5jIZ2tbT5fl9aLyctgoYvZPnnw20hoqeJJVg8vdv9Y9Ls1fofqxTTBHWNzvPn'
'u0oGcV6kTjCAInQCJ3peDPm38xZuZXUFX0qBvg1oIXMYAnNTTy0IG+EmpOvY8fGMxqhLZcjFE8'
'1w59hz+uw8G098OainwBEfcpr8KS70jrzs+qpvb1frtbDzXVC+y7SwNHfcFJmWMLA1p7vK+tNG'
'xvkbxmxhqdL1cD3jbOuL54JAFJ3J86tvpALO+lxAHBezsUhiorvB4Oicl4kLygPkrkscnRNISd'
'6+BvH6Qlnz6xk0qF4PPLtLV3CiAw7slu1xJW0i2pzcxYbg64F+SHBpQ7AHlDWgqwX9GzT6IqNv'
'Bs1oOYiX0U2COYNGLprMTqCsAVVkLH3q1/p+mhUGGafB6oL6AdrlqDKcWRPooLOH4MyaQKbCIG'
'Kjb+y7VBUGd7F1T/iNdb9LrHsGjTqgLECw7mGXD6k395Fd/i1jlyNsSZCC2uu9N8u/YZc/3MfO'
'+5uzzFX+CGci/eaEk5P7brihO01CDPggSa72LlK5Q9J5yd/FFNk0sy2sDrImjO9jToQ8yypcxt'
'8y9kbayUVytNUHSVpMRLux1OoGNdgWQ6RckqnT2iZjJzyEUF6LY8J89bAW8s1oToLuwGY3h2JX'
'a6kYEhPv4URQNKgfIHOjaUhMPAKZ6kJDYuIR6ErZLIfExCPQfklxGRITj0DXq0kuU8WfiVC/gf'
'5+q0/KVJlPRxAUZaqutSBM4tv6UPlqbMQGTTa4SDkftdtWuMeEdt3gLMC7yQfc44Az6pE+mylg'
'gQZc6AJnAcZidVFk1W/22UJpFoibRH02DyABc2vkAfw3I5sZ9Sg4sH/s/8rKiueSCiIEktyhP+'
'WsnT+r4zdbqCSHTUhsS05pZv2GCzhwxKzP1iOwWorgQJX9SiAGCXVmsMPFwSecTMwE9XtNqack'
'oy3UYqmdDbmqGrRatLlygXgu28hblU3+a3SXwVtpRCtlf94Ur5jUu4g5s8QG0tbfeuH8QD4G1c'
'aiNqvl/FUzzamaZmQOJvWjaZGGSf1osqkMyZQ/ik3lcgeUAwgi/fa8wLLqo0B109gv53mu9Md2'
'bUaYhJnCJCd2kQ0pzTQbn5MLC5FUZJGaGe5+Cn/ffnyN+YH3brnZX+El3A7J/2jwdKzW7zd1oD'
'x/nB7dcvOk35F/Y/mXGzFA/ppAGR+nAqsZiP1irqfrwInI8By649FZXMal4okgKzHSdUlge9c5'
'Q0snk0GE15GwKmlWAVlIZCglRWukGowEw/3VRqRNd32rIekW0SPWnNt4aj/naz0KGQQ7xCk3kT'
'A7xMsFGs+aN86zks5/LpliX5yDEpKrX49azuUeVj56rjzffnWYb3mnLDf7KZC2DoOklLutFhDJ'
'ZLuZEDq3U+d2ExjHHzonH9kLZXIS1svzkANrQA2JY/PRtKxzcbg+G8AcEt1EoKLsskPi2Hy0j6'
'9xJ6ACQAckHXFIHBsCjaujVn1n1MfQ3//pqu+Mhg6QVp20IKjvP+jjJK8rJEbspDfoO4KXua0J'
'yR+kVbMpMfgHfZzEVXbAGfVxjftKxt0jqXEX9ox5Y3cXOAtwN/as+uRFsCfZ8i4a0PPJXuyCCN'
'i/OyQszanH+zgN/MtDJpvHuRe0Yl2yRvBAvbF9h++fCh7Ytjnc5sxXTKop8NFUUtc3XhC3kLo9'
'WyYVVKf1Op4nX1VjQ0X3Nqn1Up0LqEm7Q3FSRYy1r9wdF/qQyy2OgbZVdd6R3gB09FFWUwqrKP'
'JqW1/2SPAxsVJJj4ve6TCOCSdK5j+GH/C1Noy2y2dCjGu1FYb6BII9PVvHhg065AatoZRXC+zd'
'trfO0lWW2ra8ljFgzXUwm5bq2U0qdfABxzHurK2FsSmdlIqwBfwhOFh+9VBXKgvYtwSeFD2pel'
'xcrzpqSZjXURgr5KmfD0NdThBlBtYxFyQREk2QD6qksijrPWrJJDX7gU6NZYrlO0y4Ebcqx1oI'
'kTpnNzTLt3l8jimJ3lxGigPD+GRM4FwqJr7d2WlhGmCgQNRQvWYK35ex34fxnM7qLj3O16g0wb'
'dxhlbbZIKazoCNVTzGrmN09kaQ7pDFudpp6auSvJM1dDmlNEIIfb2Jkmd8pYrrCSEpWkp5aLEk'
'Lrpx5W7vvTfWUF0Pq+dteSJjvumbcR5vkDT/qbtHNEvAjOp8NCSIxXw8r9ctcn7HJ4xFl1rdHv'
'fdCnFbSAskF0qSuEF6KSKmzx/q5CWQosw5hmiGGDC+nWqu2EQmS3IHnOz4yD3JSCeI6vVdb+pi'
'WbL58XdY+aIHGDOJjSEMrOm52WltRjo/BozxzMqAEdPs3nElysvsjp+W356NydvKU235IFK97X'
'LcHI04eXvO3BhtmSaDUcsnWg/r6zCHhYw6f+03RQoHQQ9zev1h7+mapXWT0We6+lYqoLgOoxsX'
'QIXjjpWAMMvjaSsBYZbH0xYxwiyP99miqkMSZiHQFY7hgBsKj8NIPuyABgBCKaFvZATWp/66j4'
'PM/9XNL4M6u2QhZhP3j3+yALOvq6k+q5wyqRJuGNBnhuuC8gC5/IXt9Nd9Nrw8JNGWv+6z4WUN'
'KgBkwssaNAAQwssnBJRXT6C/ybGjP/6X5gxapJ8/kaY6rxG7VCP9/Im0VCD9/AlIxZUOqADQmB'
'z4aNAAQNeRoFQE1K++/lONdDFOGsXX06Po1x3tcriKVOuvJ5EuDcoBZCJdSIf7+0sW6RriSNff'
'J5GuIYl0/X0S6RqSSNff60jXcxk0qJ7UgccR+RJbzb+PjT5Bi3jmk0mIcUjimT2grAEhgLZbfQ'
'cBtO+bABqy3b6jA2gV/gnb/amf6lTtlljTU8lU7ZZY01PJVO0WP+CpZKp2S6zpqWSqkK/3vUs2'
'Vbt5qr6XTNVumarvJVO1W6bqe0lQclj9T/D0V/LCU+Tp/c8+vkhb5Z/g6Y9AtT9W0akC6ZwZkz'
'kQ4EgcAfhN/VFxCUDqzynxBzvbyVUpz5QOZrKGhck/Spg8LGe2P0pW9bAw+UdY1fsdUA4g1G3+'
'WkZgGfWLedbiX0y0uNS+uoTnhPqG5KXV4XzI7rANQSMeqwvKA+SyLaM5YlT4sASNCGRU+LCcwx'
'LIqPBhOYclEFT41QwaVK/JP+2BwjAv7dfk7ToelqXdA8oaUEU6y6rX5X+aa3dYAg2vS/MH+vx1'
'ebt2h8UPfl3ert1hCTQQyKxdZLT+cv5Srd1hXruE36zdYVm7DBp1QFmAzNodUb+Wp7X7G2btIg'
'GUIAV6/MMM/8bifZNeCl/rWgraVbzkC0L3c6nPzp2CdzL5I6JT3pRM/ojolDcli2NEdMqbksUx'
'IjrlTcniGJFDgjcli2NEDgnepBdHRUAZ9eBPVYRHZIk/mB4Fyo09mIjwiCzxBxMRHpEl/mAiws'
'gYfviSifAIi/DDiQiPiAg/nIjwiIjww4kIK/U2iPAHjQgj5/dtec5VezzHvyHCj+b5zoeT+JHc'
'Qb+E8iudXGrhNdegyt4xWqTwzo/7upKZrRVz1DdFYm46ZqqgJV8+0Wb0odi3hnTl7AyyDlZbtN'
'fiEJ6cxJeigE3UiNYgbfwJsYgcNPFcY+c7VRF55iS2jQthLGkEPsr68K02U8FXB3/4fhbX5l7h'
'22BoVgurdQnfmLO+sxJIAqITumaIiLeSRfpoIt5KFumjySJVskgfTRapkkX6aN7ebFGySAlkbr'
'YoWaQEws2WioAy6rGf6iJVskgfS48Ci/SxZJEqWaSPJYtUySJ9LFmkSIP/wCVbpIoX6QeSRapk'
'kX4gWaRKFukHkkU6qn4Xi/RPzSJFIvrvYpFe5v3XHP/GIv2EXqSPu9lZHGK7xMlZ6OPS52bJ/e'
'3/v63QUVmhn0hke1RW6CeSFToqK/QTyQodlRX6iWSFjsoK/USyQkdlhX5Cr9D/kWEYDtv/Izr8'
'dF7l0ul+ErOthVO6JMIUB87HUUcAkVSa47uWls5iTTeCZjWc0IJRCzc2I0TNJrnUXFOHu+7QbX'
'Fbusb3W7sjY0k09OTcEgRnRVcsoJ48IxI6nfjsOed50p0NzpoTh66DubMLi0uW0TqdgMY9oK7g'
'c3sNwtL6VF71qav4jMYCqS2Dr+gCZwFGwdYJB5xR/wlt95X26pQnXNGzVHopDBnTeE8XOAvw5d'
'TfCx1wVv0xty0dcrms62yaQoJc4EVPV5zuC4Tx+8NdYEaLzxgWRUgy6jMQiP+clzoVo6JzP5OW'
'S+jcz+Rt4dlRGQ+BrpLsjlHRuQQyBUxGxfch5P1yX3BUfJ//DCqu5Z1jlMn67E915xgVD+Wz6V'
'HAQ/lssnOMCqc+m+wco+KhfDbZOXBh6E8u2c4xyjvHnyQ7x6jsHH+S7ByjsnP8id45fhVbQ1F9'
'EVvH39HWMfaPWX/ahn3tkT3UVGDjCQlX7QGPZaJcJNVJ7TimD/RtfTMkqSioP15gLvnbNL7jx8'
'9KXUbc3+H7TLZMbBQ1TF3ZWJQtn+txKUMQOOt8d4Pvdcbl1CX4LhLqzdSXOvQbuladnHFo+hK0'
'x48LivEJraMIk/40TVezmWhzeykan5iQw00udMPL7JxbCtLWizTFJnWZNFyT+mKei/z/WZZ/o5'
'r94xCb/w5d+3Gd2eMWj0hVmEyOFLmIqNTIsXOpizWvSeEKnA/VovaUKTVVM7nq9Xg5KY5T119+'
'8eurq87bLsqmU2bSH6+FJBSm/I3+OhgmLCUJSFuLu5NFUathjmZg8mf9f1NajaLSpM7Refkk/V'
'4JWuWV4AGCgRgGvbJzv23iv9qhyPPxenlc3pkoo6Ws6KJUuieWevKFyKKtdP/foOpKrOoskFY/'
'g4e6wHmAd4sSTsAZgPeqA13gHMAoCux2mFF/BczXpNpCaf5Vb4cIGv2VXsppMCPBFbI0OAcw6s'
'SNMBij+zKk6IBwQY/sy4leK0rU9MvQa3scUAagvaJVijIaAiHFg7//V+ShfBWoDuH7f0vpc+6d'
'JXQSs7+1TlKH9cHZNmxsRudDqJKWh+1KlwTm8qtB7Nc6LZ2gJUd2c3LfR74IqNWC5A/LBwaToY'
'GtX02PFiz9at4mtBSFnV/N2xutRWElga6jfc2wMqueAKYJ2wZbxBNp5HyUkkYOLj0B5Nc6oBxA'
'KLhjkOfU3wDTuG2DQ7a/SSPHIdvf5G0mpQZlABqVi+UaxLhQvt0g71N/C0xJmz4D8hxQHiCXcp'
'ww/W3eXlTUoBxAroTl1dfytog2Awj519LI87qVSzkOgr4Gyg84oBxApoh2kTfcrwPT9bYNn8+k'
'kaMUztfTlPP5DCi/2gHlAEId9schvnvUd7ADfrefdsD7/LlmNdiMpYxxvalvhMntwY6kupsP7+'
'mcWanMh8wASWJDmfNG2FXl3N8KnMJH5Ki84qdZODqhhnPSQLjeUHDH9Ds6XvOaUf7N1fj7Faq6'
'P6Vowz8VScncelKDO/A366HOz0ijTQoz8qh5wLgE1SLVuxk1a1Ke0TnfTgpY20tQDlfrsdRslU'
'8rJd96oh/zs3P8DcGafHgvxBFs+rZnUnFASh/WN+rUK3BFDfuZLCmeOkmeAT4IJXfz9BDsjZSL'
'3hMEb6XWHD0y10te7fmnQr7SGEXnUT+Zy20nqdvJuBn706G6V+6q3Huv/Qf/d++9eBjIw5Uq/0'
'O88Fd9f2297sEftYWjbckrokfPp762E2+ShelzeSs//Z+7X/r+vwkm6xP0j3/zpH/jpH+M/td/'
'ObeDOt9ajxq9AyvLiytdL076N+NdvNgIVsIGuX8y+gn9SnWy1vPK88wr+iulmk3SPpxc7Wl/1L'
'TXZYaJn9J4bXK9p/FNtrGu0Dt+dMJ8lQdsmqJlYNgmeS726wM2R1qSptrk16/KV1QlJ4QLUPqu'
'0OvvREp96np7wrn/1zFJaboCIl9roWUm6c+xLt3u+4gy6HSrsFltRHG6RqtcCtS2GPKgXCHnbN'
'B2vZUUOObU6Op5f3wziuP6SsMWcufQiUlnSmw4p+i8NmO56LC+0CppQZZdWyjfreWLuWaPEUuJ'
'+1KyXOSQik0W5ppdTc2tMqbhtKHFCnHipdoLlejLMFRnA8cmHdhUsLX8cz+mw1+13Gyxm4+OdX'
'11O3wuOCefsvA3opijNtHKhXrUiQ1zzQdl9dhqJeFrsIZUMVOl2hQ2d2tyu9OQ/uQPPtyL6q1S'
'89+p+r3DqNOieijWy9sktumrVlxqWqQKmT/aBJe2WlY0RSIuznhCFOVdTdfGdBlofDqdgAgsKy'
'FthSxGYut1c0Zf5Y7Xg5Z2lbqqxptENV3tmt/hQd6t86l0Xliw04jdYcbRhmSPdbcEZuuoIoXV'
'N1/LYhRwAonaYIdF5JfWWlFnsyTuOStJLnMcaA2FkTkfAbArM/X1pqTIbiLRQJRsmHX94cy2UX'
'w6Ex9IpUZkvcUeMhm5NmHWfvqJGDWTFHDTnyfjSzpibTvLSOxi2rtXghWdIEuDr681OdDIZeM5'
'DktdRqbUjhMo0dV4cDF6EqY43wjRieVIpbO9VHWZOF9/6amKxLyk+i5fFRKTfI84IGyIuKB+gI'
'wDskccEALtlYTyPeKAEAgfcCkyCD7yI8D0nX7JHt8jbh5B4ea9bsDCYP78fj/5V5Nj3+13vzsi'
'3z9AGW+R5YtZceZGv3zNzbMM4E8fOJdZJHLtxKHlu45a40npQ05ZDLQKhRmp78/jE9o7UmDqPE'
'FdSV1NCVkmdhavcWeF4xYIoWeLgHfGm3hnvJW3US9R3Me1fm6EZVMbAZM8fhNtqEeO8HvmZm2Z'
'RzV+64S1J6gBUNoG2JfHk8fc4GiS/mkW9w5DTHWuK0S4LLyZqbT7cjd/Ui/f7t+MdOJmTzNNfy'
'/yY2nkO32vyDfXgY8J6h0/asRmSQ/6ozsag9xWrhcnGkNXXGCxSL5AbffdKPlwqERt5ldh20o2'
'lxaiRoDcUS2MPZOPmbei0bV5pw2+ZAcYtzVWreb3zHLRtp5RZnx/yZyfbtSrUSNqTsj1hj1OcI'
'XX4lAXOA+w+XLfHie4QuA9ElHf4wRXCIyIehpcAPgqdYN3RRpM/js9OKAOe3+ZdZ5k1Ce1WvhU'
'1txaXucP0ugoA5K9Q/0JkU7LmmvHpfB/gxTCpPxNQ+1sNCfxxcgaP0js30knxzmI4w4KOPDujq'
'9oW0QTk/yqxmO/doMzK7mcRgreKYKvZ4kv0skdveo2iUOSRQ6cWlI1SntQBZQPhK1oSh+xwICx'
'Wf6op8+7jRTUxw0BD6dokbZV5LyqVo9JE23XzfefO/oitjsTCL18sneWEX75ZO8sZ/REdM8ywj'
'Cf7J1lHAt8sneWMzzLn9Sz/NiQ8ySrngQpE2NvHrJfwFhkFxc76Ty5puloqa1a7awCUzU/4Np0'
'2/B/N8R4ZwOpnhznsZXA33DWOjpZSRIr4BdMSrv5jpNZRK4tgaIwXChb9lw+MujtFxc9aScOqx'
'2+zolmsa4pjLKFLH2ejrJ1vaX3fdteH9O2JVTPH9y2rgd3CoIaZEsTcBnfdwbdy5YgRoBLJNhQ'
'g7VWsLnOZNsGLJiaAM8waxynUjDUaARNfUejHU3oQwJ9v8Ksu7LeZi1uvjhjgtj4viaqfXUPJk'
'o4lmzQ2jZJ3BNb5WCBb0StJ6/IpSW3gtJt9uFG0DqPFaWPEI4cmdB+XMzfqQ7Z4RALU9vFhg+T'
'hoeQh7YUe2OhwReRSG7q8Xkv+faKQderhdmH5KrIEIwouYWrIxJku5EiORNuMU9YcuUqd3INnL'
'/qp7/eZL41k9qsuI6QDVzwfj4jw3cFF6F3z7/oYwTld0BLYL2j7rQ/rgQP0MObbntatA+YXqeb'
'4gqAEz1tngbHKzv3C45nwmRaOh9a76zQ2iC4NjkEwawsDCsn+pNprTXnq+EQeCMEyJloBXW+lW'
'NERFDpXn3zvvvh7pZWRSuNoHleC71ZDXLdWVuVjAYuTPmZyUuWln+svOOc6Ga3+8/Ts3LYP+EK'
'tuUWm4OH9bc9eNj+KRmrEe9YmhghFwOm7B8+8rSYxW2hN4lO1GKVF7oESz8kQm+xsyIZJ36ta/'
'hx18aFePmTvRsXIt1P9tuznwScAXivurYLnAMYIf49DjinvgXMh1NtEer/Vm+HCPd/q7dDhPy/'
'hQ6v6wIz7nE1keqwT30bmI+l2vYZ8FAXOA9wd4c4Bvg2OpzqAucAvlEd9b6NgPpe9f1+ZKwPqA'
'xyQ5JPm2pN29C+4np9k2a7vYWLRen7fzpogOpo6UC7+XLNdPLlCrtnu9WP4jiq1gN7BGk/1WV7'
'8dzIfZILYT4+w5Ywf+sDYpskystLqfo2OsiO2oc05oK60vtZ/gkn84f9XPT0Ptwsm7aZUWZvi3'
'XQgqMZsJ/D+3kz6uIEBxfNZuWZiI4pWUaLyCFGPOu9Ylf/MPGs94pn/UN41pc5oAxAl0tV5r1i'
'SxMIn8MsMgie9T8A0y8PiGe9Vzzrf4Bnvdd7c8bCMOh/0ib0z7mONWcap/fb7iMUdxjOxxE541'
'4fFrAxYGJhQbfNVjaHGGUdJmP5tFQRN/4pEfIEnAfY2J8JOAOwsT8TcA5gY38m4ALAxv50wPhO'
'h7Y/l5wHGfULA0TJtWN3dHOI5Ynr92t/zHw5a0dOdY0QFjbjHeoC5wE2yzgBMxV71cEucA5gfE'
'j7VQ44q14DzAfG1ropZodFmx6rCMTR3OJTOTa8mZZl+aABr//krolzyKuzMLpGBr31mt6RQQW/'
'ZqBn7sBfAptPyyfgHMA4sP6yK6459Qag3j/2uUyPvEoK5LMZma/vWz/NyBiLLnMVNs1v56ga2m'
'kziNuO047MvwvwvPjrQ+Py5VJd8sQ43Kw0bmeUU9oSnOhiHzaUN/SyDxvKG3rZhw3lDWDf5V1g'
'ZtSVauz/A3VwEtY=')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
DiscoveryServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'service.proto']['descriptor'],
'service_descriptor': _INDEX[u'service.proto']['services'][u'Discovery'],
}<|fim▁end|> | 'iU2odeLmAVpLkE/qfVSjm0TsyVTNKutXnYQk9y0y99paflKbK28xgoYP3nbG0k1L6mVz/92qJ0'
'dlINPttemZRabba3MpOwUnxQQyMU8NygF0UF3t/WWfwAbUgyw3Y5/p8xf1vQcpI558tS4VGsI9'
'LxgfqJjXIS7c4fslKStesq/ovF3ONDBVvKCum9tY7uQpwLfwK2dn/Hib7IsNHbLa5peSnrhQCP' |
<|file_name|>projection.py<|end_file_name|><|fim▁begin|>import random
from firedrake import *
from firedrake_adjoint import *
import sys
mesh = UnitSquareMesh(4, 4)
V3 = FunctionSpace(mesh, "CG", 3)
V2 = FunctionSpace(mesh, "CG", 2)
firedrake.parameters["adjoint"]["record_all"] = True
def main(ic, annotate=False):
soln = project(ic, V2, annotate=annotate)
return soln
if __name__ == "__main__":
ic = project(Expression("x[0]*(x[0]-1)*x[1]*(x[1]-1)"), V3)
soln = main(ic, annotate=True)
adj_html("projection_forward.html", "forward")
assert replay_dolfin(tol=1e-12, stop=True)
J = Functional(soln*soln*dx*dt[FINISH_TIME])<|fim▁hole|> Jic = assemble(soln*soln*dx)
dJdic = compute_gradient(J, InitialConditionParameter(ic), forget=False)
def J(ic):
soln = main(ic, annotate=False)
return assemble(soln*soln*dx)
minconv = taylor_test(J, InitialConditionParameter(ic), Jic, dJdic)
if minconv < 1.9:
sys.exit(1)<|fim▁end|> | |
<|file_name|>irbuilder.py<|end_file_name|><|fim▁begin|>__author__ = 'sarangis'
from src.ir.function import *
from src.ir.module import *
from src.ir.instructions import *
BINARY_OPERATORS = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'**': lambda x, y: x ** y,
'/': lambda x, y: x / y,
'//': lambda x, y: x // y,
'<<': lambda x, y: x << y,
'>>': lambda x, y: x >> y,
'%': lambda x, y: x % type(x)(y),
'&': lambda x, y: x & y,
'|': lambda x, y: x | y,
'^': lambda x, y: x ^ y,
}
class IRBuilder:
""" The main builder to be used for creating instructions. This has to be used to insert / create / modify instructions
This class will have to support all the other class creating it.
"""
def __init__(self, current_module = None, context=None):
self.__module = current_module
self.__insertion_point = None
self.__insertion_point_idx = 0
self.__orphaned_instructions = []
self.__context = context
self.__current_bb = None
@property
def module(self):
return self.__module
@module.setter
def module(self, mod):
self.__module = mod
@property
def context(self):
return self.__context
@context.setter
def context(self, ctx):
self.__context = ctx
def get_current_bb(self):
assert self.__current_bb is not None
return self.__current_bb
def insert_after(self, ip):
if isinstance(ip, BasicBlock):
self.__insertion_point = ip
self.__insertion_point_idx = 0
self.__current_bb = ip
elif isinstance(ip, Instruction):
self.__insertion_point = ip
self.__insertion_point_idx = ip.parent.find_instruction_idx(ip)
if self.__insertion_point_idx is None:
raise InvalidInstructionException("Count not find instruction in its parent basic block")
else:
self.__insertion_point_idx += 1
else:
raise InvalidTypeException("Expected either Basic Block or Instruction")
def insert_before(self, ip):
if isinstance(ip, BasicBlock):
self.__insertion_point = ip
self.__insertion_point_idx = -1
self.__current_bb = ip
elif isinstance(ip, Instruction):
self.__insertion_point = ip
self.__insertion_point_idx = ip.parent.find_instruction_idx(ip)
if self.__insertion_point_idx == None:
raise InvalidInstructionException("Count not find instruction in its parent basic block")
elif self.__insertion_point_idx == 0:
self.__insertion_point_idx = 0
else:
self.__insertion_point_idx -= 1
else:
raise InvalidTypeException("Expected either Basic Block or Instruction")
def __add_instruction(self, inst):
if self.__insertion_point_idx == -1:
# This is an orphaned instruction
self.__orphaned_instructions.append(inst)
elif isinstance(self.__insertion_point, BasicBlock):
self.__insertion_point.instructions.append(inst)
self.__insertion_point = inst
elif isinstance(self.__insertion_point, Instruction):
bb = self.__insertion_point.parent
bb.instructions.insert(self.__insertion_point_idx + 1, inst)
self.__insertion_point_idx += 1
self.__insertion_point = inst
else:
raise Exception("Could not add instruction")
def const_fold_binary_op(self, lhs, rhs, op):
return None
# if isinstance(lhs, Number) and isinstance(rhs, Number):
# lhs = lhs.number
# rhs = rhs.number
# result = BINARY_OPERATORS[op](lhs, rhs)
# return Number(result)
# else:
# return None
def create_function(self, name, args):
f = Function(name, args)
self.__module.functions[name] = f
return f
def set_entry_point(self, function):
self.__module.entry_point = function
def create_global(self, name, initializer):
g = Global(name, initializer)
self.__module.add_global(g)
def create_basic_block(self, name, parent):
bb = BasicBlock(name, parent)
return bb
def create_return(self, value = None, name=None):
ret_inst = ReturnInstruction(value)
self.__add_instruction(ret_inst)
def create_branch(self, bb, name=None):
if not isinstance(bb, BasicBlock):
raise InvalidTypeException("Expected a Basic Block")
branch_inst = BranchInstruction(bb, self.__current_bb, name)
self.__add_instruction(branch_inst)
return branch_inst
def create_cond_branch(self, cmp_inst, value, bb_true, bb_false, name=None):
cond_branch = ConditionalBranchInstruction(cmp_inst, value, bb_true, bb_false, self.__current_bb, name)
self.__add_instruction(cond_branch)
return cond_branch
def create_call(self, func, args, name=None):
call_inst = CallInstruction(func, args, self.__current_bb, name)
self.__add_instruction(call_inst)
return call_inst
def create_add(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '+')
if folded_inst is not None:
return folded_inst
add_inst = AddInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(add_inst)
return add_inst
def create_sub(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '-')
if folded_inst is not None:
return folded_inst
sub_inst = SubInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(sub_inst)
return sub_inst
def create_mul(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '*')
if folded_inst is not None:
return folded_inst
mul_inst = MulInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(mul_inst)
return mul_inst
def create_div(self, lhs, rhs, name=None):
folded_inst = self.const_fold_binary_op(lhs, rhs, '/')
if folded_inst is not None:
return folded_inst
div_inst = DivInstruction(lhs, rhs, self.__current_bb, name)
self.__add_instruction(div_inst)
return div_inst
def create_icmp(self, lhs, rhs, comparator, name=None):
icmp_inst = ICmpInstruction(CompareTypes.SLE, lhs, rhs, self.__current_bb, name)
self.__add_instruction(icmp_inst)
return icmp_inst
def create_select(self, cond, val_true, val_false, name=None):
select_inst = SelectInstruction(cond, val_true, val_false, self.__current_bb, name)
self.__add_instruction(select_inst)
return select_inst
def create_alloca(self, numEls=None, name=None):
alloca_inst = AllocaInstruction(numEls, self.__current_bb, name)
self.__add_instruction(alloca_inst)
return alloca_inst
def create_load(self, alloca):
load_inst = LoadInstruction(alloca, parent=self.__current_bb)
self.__add_instruction(load_inst)
return load_inst
def create_store(self, alloca, value):
store_inst = StoreInstruction(alloca, value, parent=self.__current_bb)
self.__add_instruction(store_inst)
return store_inst
<|fim▁hole|> folded_inst = self.const_fold_binary_op(op1, op2, '<<')
if folded_inst is not None:
return folded_inst
shl_inst = ShiftLeftInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(shl_inst)
return shl_inst
def create_lshr(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '>>')
if folded_inst is not None:
return folded_inst
lshr_inst = LogicalShiftRightInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(lshr_inst)
return lshr_inst
def create_ashr(self, op1, op2, name=None):
ashr_inst = ArithmeticShiftRightInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(ashr_inst)
return ashr_inst
def create_and(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '&')
if folded_inst is not None:
return folded_inst
and_inst = AndInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(and_inst)
return and_inst
def create_or(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '|')
if folded_inst is not None:
return folded_inst
or_inst = OrInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(or_inst)
return or_inst
def create_xor(self, op1, op2, name=None):
folded_inst = self.const_fold_binary_op(op1, op2, '^')
if folded_inst is not None:
return folded_inst
xor_inst = XorInstruction(op1, op2, self.__current_bb, name)
self.__add_instruction(xor_inst)
return xor_inst
def create_number(self, number):
number = Number(number)
return number
def create_string(self, string):
string_obj = String(string)
return string_obj
#def create_vector(self, baseTy, numElts, name=None):
# vecTy = VectorType(baseTy, numElts)
# alloca = self.create_alloca(vecTy, 1, None, name)
# vec = self.create_load(alloca)
# return vec<|fim▁end|> | def create_shl(self, op1, op2, name=None): |
<|file_name|>coupled_OLS_SED_funcs_test.C<|end_file_name|><|fim▁begin|>/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------
*
* Copyright (C) 2011 Gabriel Terejanu - [email protected]
*
* This is an application framework for solving the problem of
* predictive model selection of coupled models as presented in the
* following paper:
*
* Gabriel Terejanu, Todd Oliver, Chris Simmons (2011). Application of
* Predictive Model Selection to Coupled Models. In Proceedings of the World
* Congress on Engineering and Computer Science 2011 Vol II, WCECS 2011,
* pp. 927-932.
*
* The framework is built on top of statistical library QUESO
* (Quantification of Uncertainty for Estimation, Simulation and Optimization).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*--------------------------------------------------------------------------
*
* coupled_OLS_SED_funcs_test.C
*
*--------------------------------------------------------------------------
*-------------------------------------------------------------------------- */
#include <cmath>
#include <map>
#include <string>
#include <iostream>
#include "uqDefines.h"
#include "uqGslVector.h"
#include "uqVectorSpace.h"
#include "uqGslMatrix.h"
#include "testsCommon.h"
#include "oscillatorLinearSpring.h"
#include "forcingSimpleExponentialDecay.h"
#include "modelForcingSimpleExponentialDecay.h"
uqFullEnvironmentClass* env;
//********************************************************************
// Definitions
//********************************************************************
bool test_OLS_SED_func();
bool test_OLS_SED_int_exact();
bool test_OLS_SED_int_energy();
//********************************************************************
// main - OLS - SED tests
//********************************************************************
int main(int argc, char* argv[])
{
MPI_Init(&argc,&argv);
env = new uqFullEnvironmentClass(MPI_COMM_WORLD,argv[1],"",NULL);
// tests OLS_SED_func
TST_MACRO( test_OLS_SED_func(),
"oscillatorLinearSpring_func",
"with forcing" );
// tests OLS_SED_int_exact
TST_MACRO( test_OLS_SED_int_exact(),
"oscillatorLinearSpring_int",
"exact solution with forcing" );
// Finalize environment
delete env;
MPI_Finalize();
// no error has been found
return 0;
}
//********************************************************************
// tests OLS_SED_func
//********************************************************************
bool test_OLS_SED_func()
{
// scenario
double scn_time = 1.0;
// params oscillator
double param_c = 2.0;
double param_k10 = param_c;
// params forcing
double param_F0 = 2.0;
double param_tau = 1.0;
// map inputs params (oscillator)
std::map<std::string, double> map_act_params;
map_act_params[ __OLS_PARAM_C ] = param_c;
map_act_params[ __OLS_PARAM_K10 ] = param_k10;
// forcing
map_act_params[ __SED_PARAM_F0 ] = param_F0;
map_act_params[ __SED_PARAM_TAU ] = param_tau;
// set coupling
map_act_params[ __SED_NAME ] = 1.0;
// outputs
double ic_y[2] = {0.5, 1.5};
double computed_f[2] = {0.0, 0.0};
double exact_f[2] =
{ ic_y[1],
param_F0*exp(-scn_time/param_tau) - param_c/__OSCILLATOR_MASS*(ic_y[0]+ic_y[1]) };
// get values
oscillatorLinearSpring_func( scn_time,
ic_y,
computed_f,
(void*)&map_act_params );
// compare the exact value with the computed one
return ( ( std::fabs( exact_f[0] - computed_f[0] ) > DOUBLE_TOL ) ||
( std::fabs( exact_f[1] - computed_f[1] ) > DOUBLE_TOL ) );
}
//********************************************************************
// tests OLS_SED_int_exact
//********************************************************************
bool test_OLS_SED_int_exact()
{
// scenario
double scn_time = 1.0;
// params oscillator
double param_c = 0.1;
double param_k10 = 0.0; // to have a linear ODE in velocity
// params forcing
double param_F0 = 2.0;
double param_tau = 1.0;
// map inputs
std::map<std::string, double> map_scn;
map_scn[ __OLS_SCN_TIME ] = scn_time;
// map params oscillator
std::map<std::string, double> map_act_params;
map_act_params[ __OLS_PARAM_C ] = param_c;
map_act_params[ __OLS_PARAM_K10 ] = param_k10;
// forcing
map_act_params[ __SED_PARAM_F0 ] = param_F0;
map_act_params[ __SED_PARAM_TAU ] = param_tau;
// set coupling
map_act_params[ __SED_NAME ] = 1.0;<|fim▁hole|> double time_to_rest;
double max_displacement;
double max_velocity;
// get final velocity
oscillatorLinearSpring_int( final_y0, final_y1,
time_to_rest,
max_displacement,
max_velocity,
false,
map_scn,
map_act_params );
double my_const = __OSCILLATOR_IC_X2 +
param_tau * param_F0 / (__OSCILLATOR_MASS - param_tau * param_c );
double pre_exp = param_tau * param_F0 / ( param_tau * param_c - __OSCILLATOR_MASS );
double exact_y1 = exp( -param_c/__OSCILLATOR_MASS*scn_time ) *
( pre_exp *
exp( (param_tau*param_c-__OSCILLATOR_MASS)/(__OSCILLATOR_MASS*param_tau)*scn_time )
+ my_const );
// std::cout << "final_y1 = " << final_y1 << std::endl;
// std::cout << "exact_y1 = " << exact_y1 << std::endl;
// compare the exact value with the computed one
return ( std::fabs( exact_y1 - final_y1 ) > DOUBLE_TOL ) ;
}<|fim▁end|> |
// outputs
double final_y0;
double final_y1; |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>import os, sys
import datetime
from glob import glob
import json
import numpy as np
import pandas
from skimage.morphology import binary_erosion
from nitime.timeseries import TimeSeries
from nitime.analysis import SpectralAnalyzer, FilterAnalyzer
import nibabel
import nipype.interfaces.spm as spm
from nipype.interfaces.base import CommandLine
import nipype.interfaces.fsl as fsl
from nipype.utils import filemanip
import nipype.interfaces.afni as afni
## deal with relative import for now
cwd = os.getcwd()
sys.path.insert(0, cwd)
import nipype_ext
########################
## naming structure used in scripts to make subdirectories
defaults = {
'rawdir': 'raw',
'func_glob': 'B*func4d.nii*',
'despiked_func_glob' : 'dsB*func4d.nii*',
'anat_glob' : 'brainmask.nii*',
'aparc_glob' : 'aparcaseg.nii*',
'aligned' : 'align4d_{0}.nii*',
'realign_ants':'ants_realign',
'realign_spm': 'spm_realign_slicetime',
'despike' : 'despike_',
'coreg' : 'coreg_masks',
'bandpass' : 'bandpass',
'model_fsl': 'model_fsl',
'wm_labels': [2,41, 77,78,79],
'vent_labels': [4,5,14,15,28,43,44,60,72,75,76],
'movement_names' : ['mc{}.1D'.format(x+1) for x in xrange(6)],
'noise_names' : ['wm.1D', 'csf.1D', 'global.1D']
}
def get_files(dir, globstr):
"""
uses glob to find dir/globstr
returns sorted list; number of files
"""
searchstr = os.path.join(dir, globstr)
files = glob(searchstr)
files.sort()
return files, len(files)
def make_datestr():
now = datetime.datetime.now()
return now.strftime('%Y_%m_%d_%H_%S')
def make_dir(base_dir, dirname='newdir'):
""" makes a new directory if it doesnt alread exist
returns full path
Parameters
----------
base_dir : str
the root directory
dirname : str (default pib_nifti)
new directory name
Returns
-------
newdir : str
full path of new directory
"""
newdir = os.path.join(base_dir,dirname)
if not os.path.isdir(base_dir):
raise IOError('ERROR: base dir %s DOES NOT EXIST'%(base_dir))
directory_exists = os.path.isdir(newdir)
if not directory_exists:
os.mkdir(newdir)
return newdir, directory_exists
def fsl_make4d(infiles, newfile):
"""a list of files is passed, a 4D volume will be created
in the same directory as the original files"""
if not hasattr(infiles, '__iter__'):
raise IOError('expected list,not %s'%(infiles))
startdir = os.getcwd()
pth, nme = os.path.split(infiles[0])
os.chdir(pth)
merge = fsl.Merge()
merge.inputs.in_files = infiles
merge.inputs.dimension = 't'
merge.inputs.merged_file = newfile
out = merge.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None
else:
return out.outputs.merged_file
def fsl_split4d(infile, outdir, sid):
""" uses fsl to split 4d file into parts
based on sid, puts resulting files in outdir
"""
startdir = os.getcwd()
pth, nme = os.path.split(infile)
os.chdir(outdir)
im = fsl.Split()
im.inputs.in_file = infile
im.inputs.dimension = 't'
im.inputs.out_base_name = sid
im.inputs.output_type = 'NIFTI'
out = im.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None
else:
# fsl split may include input file as an output
## bad globbing...
# remove it here
outfiles = out.outputs.out_files
outfiles = [x for x in outfiles if not x == im.inputs.in_file]
return outfiles
def get_slicetime(nslices):
"""
If TOTAL # SLICES = EVEN, then the excitation order when interleaved
is EVENS first, ODDS second.
If TOTAL # SLICES = ODD, then the excitation order when interleaved is
ODDS first, EVENS second.
Returns:
sliceorder: list
list containing the order of slice acquisition used for slicetime
correction
"""
if np.mod(nslices,2) == 0:
sliceorder = np.concatenate((np.arange(2,nslices+1,2),
np.arange(1,nslices+1,2)))
else:
sliceorder = np.concatenate((np.arange(1,nslices+1,2),
np.arange(2,nslices+1,2)))
# cast to a list for use with interface
return list(sliceorder)
def get_slicetime_vars(infiles, TR=None):
"""
uses nibabel to get slicetime variables
Returns:
dict: dict
nsclies : number of slices<|fim▁hole|> if hasattr(infiles, '__iter__'):
img = nibabel.load(infiles[0])
else:
img = nibabel.load(infiles)
hdr = img.get_header()
if TR is None:
raise RuntimeError('TR is not defined ')
shape = img.get_shape()
nslices = shape[2]
TA = TR - TR/nslices
sliceorder = get_slicetime(nslices)
return dict(nslices=nslices,
TA = TA,
TR = TR,
sliceorder = sliceorder)
def save_json(inobj, outfile):
''' save inobj to outfile using json'''
try:
json.dump(inobj, open(outfile,'w+'))
except:
raise IOError('Unable to save %s to %s (json)'%(inobj, outfile))
def load_json(infile):
''' use json to load objects in json file'''
try:
result = json.load(open(infile))
except:
raise IOError('Unable to load %s' %infile)
return result
def zip_files(files):
if not hasattr(files, '__iter__'):
files = [files]
for f in files:
base, ext = os.path.splitext(f)
if 'gz' in ext:
# file already gzipped
continue
cmd = CommandLine('gzip %s' % f)
cout = cmd.run()
if not cout.runtime.returncode == 0:
logging.error('Failed to zip %s'%(f))
def unzip_file(infile):
""" looks for gz at end of file,
unzips and returns unzipped filename"""
base, ext = os.path.splitext(infile)
if not ext == '.gz':
return infile
else:
if os.path.isfile(base):
return base
cmd = CommandLine('gunzip %s' % infile)
cout = cmd.run()
if not cout.runtime.returncode == 0:
print 'Failed to unzip %s'%(infile)
return None
else:
return base
def copy_file(infile, newdir):
""" copy infile to new directory
return full path of new file
"""
cl = CommandLine('cp %s %s'%(infile, newdir))
out = cl.run()
if not out.runtime.returncode == 0:
print 'failed to copy %s' % infile
print out.runtime.stderr
return None
else:
basenme = os.path.split(infile)[1]
newfile = os.path.join(newdir, basenme)
return newfile
def copy_files(infiles, newdir):
"""wraps copy file to run across multiple files
returns list"""
newfiles = []
for f in infiles:
newf = copy_file(f, newdir)
newfiles.append(newf)
return newfiles
def remove_files(files):
"""removes files """
if not hasattr(files, '__iter__'):
cl = CommandLine('rm %s'% files)
out = cl.run()
if not out.runtime.returncode == 0:
print 'failed to delete %s' % files
print out.runtime.stderr
return
for f in files:
cl = CommandLine('rm %s'% f)
out = cl.run()
if not out.runtime.returncode == 0:
print 'failed to delete %s' % f
print out.runtime.stderr
def afni_despike(in4d):
""" uses afni despike to despike a 4D dataset
saves as ds_<filename>"""
dspike = afni.Despike()
dspike.inputs.in_file = in4d
dspike.inputs.outputtype = 'NIFTI_GZ'
dspike.inputs.ignore_exception = True
outfile = filemanip.fname_presuffix(in4d, 'ds')
dspike.inputs.out_file = outfile
res = dspike.run()
return res.runtime.returncode, res
def spm_realign(infiles, matlab='matlab-spm8'):
""" Uses SPM to realign files"""
startdir = os.getcwd()
pth, _ = os.path.split(infiles[0])
os.chdir(pth)
rlgn = spm.Realign(matlab_cmd = matlab)
rlgn.inputs.in_files = infiles
rlgn.inputs.register_to_mean = True
out = rlgn.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None, None, None
return out.outputs.mean_image, out.outputs.realigned_files,\
out.outputs.realignment_parameters
def spm_realign_unwarp(infiles, matlab = 'matlab-spm8'):
""" uses spm to run realign_unwarp
Returns
-------
mean_img = File; mean generated by unwarp/realign
realigned_files = Files; files unwarped and realigned
parameters = File; file holding the trans rot params
"""
startdir = os.getcwd()
pth, _ = os.path.split(infiles[0])
os.chdir(pth)
ru = nipype_ext.RealignUnwarp(matlab_cmd = matlab)
ru.inputs.in_files = infiles
ruout = ru.run()
os.chdir(startdir)
if not ruout.runtime.returncode == 0:
print ruout.runtime.stderr
return None, None, None
return ruout.outputs.mean_image, ruout.outputs.realigned_files,\
ruout.outputs.realignment_parameters
def make_mean(niftilist, outfile):
"""given a list of nifti files
generates a mean image"""
if not hasattr(niftilist, '__iter__'):
raise IOError('%s is not a list of valid nifti files,'\
' cannot make mean'%niftilist)
n_images = len(niftilist)
affine = nibabel.load(niftilist[0]).get_affine()
shape = nibabel.load(niftilist[0]).get_shape()
newdat = np.zeros(shape)
for item in niftilist:
newdat += nibabel.load(item).get_data().copy()
newdat = newdat / n_images
newdat = np.nan_to_num(newdat)
newimg = nibabel.Nifti1Image(newdat, affine)
newimg.to_filename(outfile)
return outfile
def mean_from4d(in4d, outfile):
""" given a 4D files, calc mean across voxels (time)
and write new 3D file to outfile with same mapping
as in4d"""
##FIXME consider unzipping files first if this is slow
## Fast memmap doesnt work on zipped files very well
affine = nibabel.load(in4d).get_affine()
dat = nibabel.load(in4d).get_data()
mean = dat.mean(axis=-1)
newimg = nibabel.Nifti1Image(mean, affine)
try:
newimg.to_filename(outfile)
return outfile
except:
raise IOError('Unable to write {0}'.format(outfile))
def simple_mask(dataf, maskf, outfile, thr=0):
""" sets values in data to zero if they are zero in mask"""
img = nibabel.load(dataf)
dat = img.get_data()
mask = nibabel.load(maskf).get_data()
if not dat.shape == mask.shape:
raise IOError('shape mismatch {0}, {1}'.format(dataf, maskf))
dat[mask <=thr] = 0
newimg = nibabel.Nifti1Image(dat, img.get_affine())
newimg.to_filename(outfile)
return outfile
def aparc_mask(aparc, labels, outfile = 'bin_labelmask.nii.gz'):
""" takes coreg'd aparc and makes a mask based on label values
Parameters
==========
aparc : filename
file containing label image (ints)
labels : tuple
tuple of label values (ints)
"""
pth, _ = os.path.split(outfile)
img = nibabel.load(aparc)
mask = np.zeros(img.get_shape())
label_dat = img.get_data()
for label in labels:
mask[label_dat == label] = 1
masked_img = nibabel.Nifti1Image(mask, img.get_affine())
outf = os.path.join(pth, outfile)
masked_img.to_filename(outf)
return outf
def erode(infile):
""" use skimage.morphology to quickly erode binary mask"""
img = nibabel.load(infile)
dat = img.get_data().squeeze()
## make kernel
tmp = np.diag([0,1,0])
mid = np.zeros((3,3))
mid[1,:] = 1
mid[:,1] = 1
kernel = np.hstack((tmp, mid, tmp))
kernel.shape = (3,3,3)
## erode with kernel
eroded = binary_erosion(dat, kernel)
eroded = eroded.astype(int)
newfile = filemanip.fname_presuffix(infile, 'e')
newimg = nibabel.Nifti1Image(eroded, img.get_affine())
newimg.to_filename(newfile)
return newfile
def get_seedname(seedfile):
_, nme, _ = filemanip.split_filename(seedfile)
return nme
def extract_seed_ts(data, seeds):
""" check shape match of data and seed if same assume registration
extract mean of data in seed > 0"""
data_dat = nibabel.load(data).get_data()
meants = []
for seed in seeds:
seed_dat = nibabel.load(seed).get_data().squeeze()
assert seed_dat.shape == data_dat.shape[:3]
seed_dat[data_dat[:,:,:,0].squeeze() <=0] = 0
tmp = data_dat[seed_dat > 0,:]
meants.append(tmp.mean(0))
return meants
def mask4d_with3d(datf, maskf, labels):
""" given a 4D data file, and a mask file
for each label in labels, pull the mean ts
and save to an array that is nlabels X ntimepoints"""
dat = nibabel.load(datf).get_data()
mask = nibabel.load(maskf).get_data()
if not dat.shape[:3] == mask.shape:
raise ValueError('Shape of dat does not match mask')
result = np.empty((len(labels), dat.shape[-1]))
for val, label in enumerate(labels):
region = dat[mask == label, :]
result[val, :] = region.mean(axis=0)
return result
def bandpass_data():
""" filters for 4D images and timeseries in txt files
Uses afni 3dBandpass
"""
pass
def nitime_bandpass(data, tr, ub=0.15, lb=0.0083):
""" use nittime to bandpass filter regressors
format of data shoud be samples X timepoints"""
ts = TimeSeries(data, sampling_interval=tr)
filtered_ts = FilterAnalyzer(ts, ub=ub, lb=lb)
return filtered_ts.data
def write_filtered(data, outfile):
data.to_file(outfile)
def bandpass_regressor():
""" filters motion params and timeseries from csf and white matter
(also global signal when relevant)
Use afni 1dBandpass, motion values in a 1d file"""
pass
def zero_pad_movement(dataframe):
#insert row of zeros into a dataframe
rows, cols = dataframe.shape
newdat = np.zeros((rows+1, cols))
newdat[1:,:] = dataframe
return pandas.DataFrame(newdat, columns = dataframe.columns)
def smooth_to_fwhm(in4d, outfile = None, fwhm = '8'):
""" 3dBlurToFWHM -input res4d.nii.gz -FWHM 8
use 3dAFNItoNIFTI to convert"""
if outfile is None:
outfile = filemanip.fname_presuffix(in4d,
prefix = 'blur_{}'.format(fwhm))
cmd = '3dBlurToFWHM'
fullcmd = ' '.join([cmd,
'-prefix',
outfile,
'-input',
in4d,
'-FWHM',
'{}'.format(fwhm)] )
res = CommandLine(fullcmd).run()
if res.runtime.returncode == 0:
return fullcmd, outfile
print res.runtime.stderr
return None
def fsl_bandpass(infile, outfile, tr, lowf=0.0083, highf=0.15):
""" use fslmaths to bandpass filter a 4d file"""
startdir = os.getcwd()
pth, nme = os.path.split(infile)
os.chdir(pth)
low_freq = 1 / lowf / 2 / tr
high_freq = 1 / highf / 2 / tr
im = fsl.ImageMaths()
im.inputs.in_file = infile
im.inputs.out_file = outfile
op_str = ' '.join(['-bptf',str(low_freq), str(high_freq)])
im.inputs.op_string = op_str
im.inputs.suffix = 'bpfilter_l%2.2f_h%2.2f'%(low_freq, high_freq)
out = im.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None
else:
return out.outputs.out_file
def spm_slicetime(infiles, matlab_cmd='matlab-spm8',stdict = None):
"""
runs slice timing
returns
timecorrected_files
"""
startdir = os.getcwd()
pth, _ = os.path.split(infiles[0])
os.chdir(pth)
if stdict == None:
stdict = get_slicetime_vars(infiles)
sliceorder = stdict['sliceorder']
st = spm.SliceTiming(matlab_cmd = matlab_cmd)
st.inputs.in_files = infiles
st.inputs.ref_slice = np.round(stdict['nslices'] / 2.0).astype(int)
st.inputs.slice_order = sliceorder
st.inputs.time_acquisition = stdict['TA']
st.inputs.time_repetition = stdict['TR']
st.inputs.num_slices = stdict['nslices']
out = st.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None
else:
return out.outputs.timecorrected_files
def spm_coregister(moving, target, apply_to_files=None,
matlab_cmd='matlab-spm8'):
"""
runs coregistration for moving to target
"""
startdir = os.getcwd()
pth, _ = os.path.split(moving)
os.chdir(pth)
cr = spm.Coregister(matlab_cmd = matlab_cmd)
cr.inputs.source = moving
cr.inputs.target = target
if apply_to_files is not None:
cr.inputs.apply_to_files = apply_to_files
out = cr.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None, None
else:
return out.outputs.coregistered_source,\
out.outputs.coregistered_files
def update_fsf(fsf, fsf_dict):
""" update fsf with subject specific data
Parameters
----------
fsf : filename
filename of default fsf file with default parameters
to use for your model
fsf_dict : dict
dictionary holding data with the following keys:
nuisance_dir
nuisance_outdir
input_data
TR
nTR
Returns
-------
tmp5 : string
string to write to new fsf file
"""
original = open(fsf).read()
tmp1 = original.replace('nuisance_dir',
fsf_dict['nuisance_dir'])
tmp2 = tmp1.replace('nuisance_model_outputdir',
fsf_dict['nuisance_outdir'])
tmp3 = tmp2.replace('nuisance_model_input_data',
fsf_dict['input_data'])
tmp4 = tmp3.replace('nuisance_model_TR',
fsf_dict['TR'])
tmp5 = tmp4.replace('nuisance_model_numTRs',
fsf_dict['nTR'])
return tmp5
def write_fsf(fsf_string, outfile):
""" writes an updated fsf string (see update_fsf)
to outfile"""
with open(outfile, 'w+') as fid:
fid.write(fsf_string)
def run_feat_model(fsf_file):
""" runs FSL's feat_model which uses the fsf file to generate
files necessary to run film_gls to fit design matrix to timeseries"""
clean_fsf = fsf_file.strip('.fsf')
cmd = 'feat_model %s'%(clean_fsf)
out = CommandLine(cmd).run()
if not out.runtime.returncode == 0:
return None, out.runtime.stderr
mat = fsf_file.replace('.fsf', '.mat')
return mat, cmd
def run_film(data, design, results_dir):
minval = nibabel.load(data).get_data().min()
if minval < 0:
minval=0
film = fsl.FILMGLS()
film.inputs.in_file = data
film.inputs.design_file = design
film.inputs.threshold = minval
film.inputs.results_dir = results_dir
film.inputs.smooth_autocorr = True
film.inputs.mask_size = 5
res = film.run()
return res<|fim▁end|> | TA : acquisition Time
TR: repetition Time
sliceorder : array with slice order to run slicetime correction
""" |
<|file_name|>config_flow.py<|end_file_name|><|fim▁begin|>"""Config flow for ReCollect Waste integration."""
from __future__ import annotations
from typing import Any
from aiorecollect.client import Client
from aiorecollect.errors import RecollectError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_FRIENDLY_NAME
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import aiohttp_client
from .const import CONF_PLACE_ID, CONF_SERVICE_ID, DOMAIN, LOGGER
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_PLACE_ID): str, vol.Required(CONF_SERVICE_ID): str}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for ReCollect Waste."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Define the config flow to handle options."""
return RecollectWasteOptionsFlowHandler(config_entry)
async def async_step_import(
self, import_config: dict[str, Any] | None = None
) -> FlowResult:
"""Handle configuration via YAML import."""
return await self.async_step_user(import_config)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle configuration via the UI."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors={}
)
unique_id = f"{user_input[CONF_PLACE_ID]}, {user_input[CONF_SERVICE_ID]}"
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
session = aiohttp_client.async_get_clientsession(self.hass)
client = Client(
user_input[CONF_PLACE_ID], user_input[CONF_SERVICE_ID], session=session
)
try:
await client.async_get_next_pickup_event()
except RecollectError as err:
LOGGER.error("Error during setup of integration: %s", err)
return self.async_show_form(
step_id="user",<|fim▁hole|> data_schema=DATA_SCHEMA,
errors={"base": "invalid_place_or_service_id"},
)
return self.async_create_entry(
title=unique_id,
data={
CONF_PLACE_ID: user_input[CONF_PLACE_ID],
CONF_SERVICE_ID: user_input[CONF_SERVICE_ID],
},
)
class RecollectWasteOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a Recollect Waste options flow."""
def __init__(self, entry: config_entries.ConfigEntry) -> None:
"""Initialize."""
self._entry = entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_FRIENDLY_NAME,
default=self._entry.options.get(CONF_FRIENDLY_NAME),
): bool
}
),
)<|fim▁end|> | |
<|file_name|>_context.py<|end_file_name|><|fim▁begin|>from __future__ import annotations
from collections import defaultdict
from collections.abc import Generator, Iterable, Mapping, MutableMapping
from contextlib import contextmanager
import logging
import re
import textwrap
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NamedTuple
from markdown_it.rules_block.html_block import HTML_SEQUENCES
from mdformat import codepoints
from mdformat._compat import Literal
from mdformat._conf import DEFAULT_OPTS
from mdformat.renderer._util import (
RE_CHAR_REFERENCE,
decimalify_leading,
decimalify_trailing,
escape_asterisk_emphasis,
escape_underscore_emphasis,
get_list_marker_type,
is_tight_list,
is_tight_list_item,
longest_consecutive_sequence,
maybe_add_link_brackets,
)
from mdformat.renderer.typing import Postprocess, Render
if TYPE_CHECKING:
from mdformat.renderer import RenderTreeNode
LOGGER = logging.getLogger(__name__)
# A marker used to point a location where word wrap is allowed
# to occur.
WRAP_POINT = "\x00"
# A marker used to indicate location of a character that should be preserved
# during word wrap. Should be converted to the actual character after wrap.
PRESERVE_CHAR = "\x00"
def make_render_children(separator: str) -> Render:
def render_children(
node: RenderTreeNode,
context: RenderContext,
) -> str:
return separator.join(child.render(context) for child in node.children)
return render_children
def hr(node: RenderTreeNode, context: RenderContext) -> str:
thematic_break_width = 70
return "_" * thematic_break_width
def code_inline(node: RenderTreeNode, context: RenderContext) -> str:
code = node.content
all_chars_are_whitespace = not code.strip()
longest_backtick_seq = longest_consecutive_sequence(code, "`")
if longest_backtick_seq:
separator = "`" * (longest_backtick_seq + 1)
return f"{separator} {code} {separator}"
if code.startswith(" ") and code.endswith(" ") and not all_chars_are_whitespace:
return f"` {code} `"
return f"`{code}`"
def html_block(node: RenderTreeNode, context: RenderContext) -> str:
content = node.content.rstrip("\n")
# Need to strip leading spaces because we do so for regular Markdown too.
# Without the stripping the raw HTML and Markdown get unaligned and
# semantic may change.
content = content.lstrip()
return content
def html_inline(node: RenderTreeNode, context: RenderContext) -> str:
return node.content
def _in_block(block_name: str, node: RenderTreeNode) -> bool:
while node.parent:
if node.parent.type == block_name:
return True
node = node.parent
return False
def hardbreak(node: RenderTreeNode, context: RenderContext) -> str:
if _in_block("heading", node):
return "<br /> "
return "\\" + "\n"
def softbreak(node: RenderTreeNode, context: RenderContext) -> str:
if context.do_wrap and _in_block("paragraph", node):
return WRAP_POINT
return "\n"
def text(node: RenderTreeNode, context: RenderContext) -> str:
"""Process a text token.
Text should always be a child of an inline token. An inline token
should always be enclosed by a heading or a paragraph.
"""
text = node.content
# Escape backslash to prevent it from making unintended escapes.
# This escape has to be first, else we start multiplying backslashes.
text = text.replace("\\", "\\\\")
text = escape_asterisk_emphasis(text) # Escape emphasis/strong marker.
text = escape_underscore_emphasis(text) # Escape emphasis/strong marker.
text = text.replace("[", "\\[") # Escape link label enclosure
text = text.replace("]", "\\]") # Escape link label enclosure
text = text.replace("<", "\\<") # Escape URI enclosure
text = text.replace("`", "\\`") # Escape code span marker
# Escape "&" if it starts a sequence that can be interpreted as
# a character reference.
text = RE_CHAR_REFERENCE.sub(r"\\\g<0>", text)
# The parser can give us consecutive newlines which can break
# the markdown structure. Replace two or more consecutive newlines
# with newline character's decimal reference.
text = text.replace("\n\n", " ")
# If the last character is a "!" and the token next up is a link, we
# have to escape the "!" or else the link will be interpreted as image.
next_sibling = node.next_sibling
if text.endswith("!") and next_sibling and next_sibling.type == "link":
text = text[:-1] + "\\!"
if context.do_wrap and _in_block("paragraph", node):
text = re.sub(r"\s+", WRAP_POINT, text)
return text
def fence(node: RenderTreeNode, context: RenderContext) -> str:
info_str = node.info.strip()
lang = info_str.split(maxsplit=1)[0] if info_str else ""
code_block = node.content
# Info strings of backtick code fences cannot contain backticks.
# If that is the case, we make a tilde code fence instead.
fence_char = "~" if "`" in info_str else "`"
# Format the code block using enabled codeformatter funcs
if lang in context.options.get("codeformatters", {}):
fmt_func = context.options["codeformatters"][lang]
try:
code_block = fmt_func(code_block, info_str)
except Exception:
# Swallow exceptions so that formatter errors (e.g. due to
# invalid code) do not crash mdformat.
assert node.map is not None, "A fence token must have `map` attribute set"
filename = context.options.get("mdformat", {}).get("filename", "")
warn_msg = (
f"Failed formatting content of a {lang} code block "
f"(line {node.map[0] + 1} before formatting)"
)
if filename:
warn_msg += f". Filename: {filename}"
LOGGER.warning(warn_msg)
# The code block must not include as long or longer sequence of `fence_char`s
# as the fence string itself
fence_len = max(3, longest_consecutive_sequence(code_block, fence_char) + 1)
fence_str = fence_char * fence_len
return f"{fence_str}{info_str}\n{code_block}{fence_str}"
def code_block(node: RenderTreeNode, context: RenderContext) -> str:
return fence(node, context)
def image(node: RenderTreeNode, context: RenderContext) -> str:
description = _render_inline_as_text(node, context)
if context.do_wrap:
# Prevent line breaks
description = description.replace(WRAP_POINT, " ")
ref_label = node.meta.get("label")
if ref_label:
context.env["used_refs"].add(ref_label)
ref_label_repr = ref_label.lower()
if description.lower() == ref_label_repr:
return f"![{description}]"
return f"![{description}][{ref_label_repr}]"
uri = node.attrs["src"]
assert isinstance(uri, str)
uri = maybe_add_link_brackets(uri)
title = node.attrs.get("title")
if title is not None:
return f''
return f""
def _render_inline_as_text(node: RenderTreeNode, context: RenderContext) -> str:
"""Special kludge for image `alt` attributes to conform CommonMark spec.
Don't try to use it! Spec requires to show `alt` content with
stripped markup, instead of simple escaping.
"""
def text_renderer(node: RenderTreeNode, context: RenderContext) -> str:
return node.content
def image_renderer(node: RenderTreeNode, context: RenderContext) -> str:
return _render_inline_as_text(node, context)
inline_renderers: Mapping[str, Render] = defaultdict(
lambda: make_render_children(""),
{
"text": text_renderer,
"image": image_renderer,
"link": link,
"softbreak": softbreak,
},
)
inline_context = RenderContext(
inline_renderers, context.postprocessors, context.options, context.env
)
return make_render_children("")(node, inline_context)
def link(node: RenderTreeNode, context: RenderContext) -> str:
if node.info == "auto":
autolink_url = node.attrs["href"]
assert isinstance(autolink_url, str)
# The parser adds a "mailto:" prefix to autolink email href. We remove the
# prefix if it wasn't there in the source.
if autolink_url.startswith("mailto:") and not node.children[
0
].content.startswith("mailto:"):
autolink_url = autolink_url[7:]
return "<" + autolink_url + ">"
text = "".join(child.render(context) for child in node.children)
if context.do_wrap:
# Prevent line breaks
text = text.replace(WRAP_POINT, " ")
ref_label = node.meta.get("label")
if ref_label:
context.env["used_refs"].add(ref_label)
ref_label_repr = ref_label.lower()
if text.lower() == ref_label_repr:
return f"[{text}]"
return f"[{text}][{ref_label_repr}]"
uri = node.attrs["href"]
assert isinstance(uri, str)
uri = maybe_add_link_brackets(uri)
title = node.attrs.get("title")
if title is None:
return f"[{text}]({uri})"
assert isinstance(title, str)
title = title.replace('"', '\\"')
return f'[{text}]({uri} "{title}")'
def em(node: RenderTreeNode, context: RenderContext) -> str:
text = make_render_children(separator="")(node, context)
indicator = node.markup
return indicator + text + indicator
def strong(node: RenderTreeNode, context: RenderContext) -> str:
text = make_render_children(separator="")(node, context)
indicator = node.markup
return indicator + text + indicator
def heading(node: RenderTreeNode, context: RenderContext) -> str:
text = make_render_children(separator="")(node, context)
if node.markup == "=":
prefix = "# "
elif node.markup == "-":
prefix = "## "
else: # ATX heading
prefix = node.markup + " "
# There can be newlines in setext headers, but we make an ATX
# header always. Convert newlines to spaces.
text = text.replace("\n", " ")
# If the text ends in a sequence of hashes (#), the hashes will be
# interpreted as an optional closing sequence of the heading, and
# will not be rendered. Escape a line ending hash to prevent this.
if text.endswith("#"):
text = text[:-1] + "\\#"
return prefix + text
def blockquote(node: RenderTreeNode, context: RenderContext) -> str:
marker = "> "
with context.indented(len(marker)):
text = make_render_children(separator="\n\n")(node, context)
lines = text.splitlines()
if not lines:
return ">"
quoted_lines = (f"{marker}{line}" if line else ">" for line in lines)
quoted_str = "\n".join(quoted_lines)
return quoted_str
def _wrap(text: str, *, width: int | Literal["no"]) -> str:
"""Wrap text at locations pointed by `WRAP_POINT`s.
Converts `WRAP_POINT`s to either a space or newline character, thus
wrapping the text. Already existing whitespace will be preserved as
is.
"""
text, replacements = _prepare_wrap(text)
if width == "no":
return _recover_preserve_chars(text, replacements)
wrapper = textwrap.TextWrapper(
break_long_words=False,
break_on_hyphens=False,
width=width,
expand_tabs=False,
replace_whitespace=False,
)
wrapped = wrapper.fill(text)
wrapped = _recover_preserve_chars(wrapped, replacements)
return " " + wrapped if text.startswith(" ") else wrapped
def _prepare_wrap(text: str) -> tuple[str, str]:
"""Prepare text for wrap.
Convert `WRAP_POINT`s to spaces. Convert whitespace to
`PRESERVE_CHAR`s. Return a tuple with the prepared string, and
another string consisting of replacement characters for
`PRESERVE_CHAR`s.
"""
result = ""
replacements = ""
for c in text:
if c == WRAP_POINT:
if not result or result[-1] != " ":
result += " "
elif c in codepoints.UNICODE_WHITESPACE:
result += PRESERVE_CHAR
replacements += c
else:
result += c
return result, replacements
def _recover_preserve_chars(text: str, replacements: str) -> str:
replacement_iterator = iter(replacements)
return "".join(
next(replacement_iterator) if c == PRESERVE_CHAR else c for c in text
)
def paragraph(node: RenderTreeNode, context: RenderContext) -> str: # noqa: C901
inline_node = node.children[0]
text = inline_node.render(context)
if context.do_wrap:
wrap_mode = context.options["mdformat"]["wrap"]
if isinstance(wrap_mode, int):
wrap_mode -= context.env["indent_width"]
wrap_mode = max(1, wrap_mode)
text = _wrap(text, width=wrap_mode)
# A paragraph can start or end in whitespace e.g. if the whitespace was
# in decimal representation form. We need to re-decimalify it, one reason being
# to enable "empty" paragraphs with whitespace only.
text = decimalify_leading(codepoints.UNICODE_WHITESPACE, text)
text = decimalify_trailing(codepoints.UNICODE_WHITESPACE, text)
lines = text.split("\n")
for i in range(len(lines)):
# Strip whitespace to prevent issues like a line starting tab that is
# interpreted as start of a code block.
lines[i] = lines[i].strip()
# If a line looks like an ATX heading, escape the first hash.
if re.match(r"#{1,6}( |\t|$)", lines[i]):
lines[i] = f"\\{lines[i]}"
# Make sure a paragraph line does not start with ">"
# (otherwise it will be interpreted as a block quote).
if lines[i].startswith(">"):
lines[i] = f"\\{lines[i]}"
# Make sure a paragraph line does not start with "*", "-" or "+"
# followed by a space, tab, or end of line.
# (otherwise it will be interpreted as list item).
if re.match(r"[-*+]( |\t|$)", lines[i]):
lines[i] = f"\\{lines[i]}"
# If a line starts with a number followed by "." or ")" followed by
# a space, tab or end of line, escape the "." or ")" or it will be
# interpreted as ordered list item.
if re.match(r"[0-9]+\)( |\t|$)", lines[i]):
lines[i] = lines[i].replace(")", "\\)", 1)
if re.match(r"[0-9]+\.( |\t|$)", lines[i]):
lines[i] = lines[i].replace(".", "\\.", 1)
# Consecutive "-", "*" or "_" sequences can be interpreted as thematic
# break. Escape them.
space_removed = lines[i].replace(" ", "").replace("\t", "")
if len(space_removed) >= 3:
if all(c == "*" for c in space_removed):
lines[i] = lines[i].replace("*", "\\*", 1) # pragma: no cover
elif all(c == "-" for c in space_removed):
lines[i] = lines[i].replace("-", "\\-", 1)
elif all(c == "_" for c in space_removed):
lines[i] = lines[i].replace("_", "\\_", 1) # pragma: no cover
# A stripped line where all characters are "=" or "-" will be
# interpreted as a setext heading. Escape.
stripped = lines[i].strip(" \t")
if all(c == "-" for c in stripped):
lines[i] = lines[i].replace("-", "\\-", 1)
elif all(c == "=" for c in stripped):
lines[i] = lines[i].replace("=", "\\=", 1)
# Check if the line could be interpreted as an HTML block.
# If yes, prefix it with 4 spaces to prevent this.
for html_seq_tuple in HTML_SEQUENCES:
can_break_paragraph = html_seq_tuple[2]
opening_re = html_seq_tuple[0]<|fim▁hole|>
text = "\n".join(lines)
return text
def list_item(node: RenderTreeNode, context: RenderContext) -> str:
"""Return one list item as string.
This returns just the content. List item markers and indentation are
added in `bullet_list` and `ordered_list` renderers.
"""
block_separator = "\n" if is_tight_list_item(node) else "\n\n"
text = make_render_children(block_separator)(node, context)
if not text.strip():
return ""
return text
def bullet_list(node: RenderTreeNode, context: RenderContext) -> str:
marker_type = get_list_marker_type(node)
first_line_indent = " "
indent = " " * len(marker_type + first_line_indent)
block_separator = "\n" if is_tight_list(node) else "\n\n"
with context.indented(len(indent)):
text = ""
for child_idx, child in enumerate(node.children):
list_item = child.render(context)
formatted_lines = []
line_iterator = iter(list_item.split("\n"))
first_line = next(line_iterator)
formatted_lines.append(
f"{marker_type}{first_line_indent}{first_line}"
if first_line
else marker_type
)
for line in line_iterator:
formatted_lines.append(f"{indent}{line}" if line else "")
text += "\n".join(formatted_lines)
if child_idx != len(node.children) - 1:
text += block_separator
return text
def ordered_list(node: RenderTreeNode, context: RenderContext) -> str:
consecutive_numbering = context.options.get("mdformat", {}).get(
"number", DEFAULT_OPTS["number"]
)
marker_type = get_list_marker_type(node)
first_line_indent = " "
block_separator = "\n" if is_tight_list(node) else "\n\n"
list_len = len(node.children)
starting_number = node.attrs.get("start")
if starting_number is None:
starting_number = 1
assert isinstance(starting_number, int)
if consecutive_numbering:
indent_width = len(
f"{list_len + starting_number - 1}{marker_type}{first_line_indent}"
)
else:
indent_width = len(f"{starting_number}{marker_type}{first_line_indent}")
text = ""
with context.indented(indent_width):
for list_item_index, list_item in enumerate(node.children):
list_item_text = list_item.render(context)
formatted_lines = []
line_iterator = iter(list_item_text.split("\n"))
first_line = next(line_iterator)
if consecutive_numbering:
# Prefix first line of the list item with consecutive numbering,
# padded with zeros to make all markers of even length.
# E.g.
# 002. This is the first list item
# 003. Second item
# ...
# 112. Last item
number = starting_number + list_item_index
pad = len(str(list_len + starting_number - 1))
number_str = str(number).rjust(pad, "0")
formatted_lines.append(
f"{number_str}{marker_type}{first_line_indent}{first_line}"
if first_line
else f"{number_str}{marker_type}"
)
else:
# Prefix first line of first item with the starting number of the
# list. Prefix following list items with the number one
# prefixed by zeros to make the list item marker of even length
# with the first one.
# E.g.
# 5321. This is the first list item
# 0001. Second item
# 0001. Third item
first_item_marker = f"{starting_number}{marker_type}"
other_item_marker = (
"0" * (len(str(starting_number)) - 1) + "1" + marker_type
)
if list_item_index == 0:
formatted_lines.append(
f"{first_item_marker}{first_line_indent}{first_line}"
if first_line
else first_item_marker
)
else:
formatted_lines.append(
f"{other_item_marker}{first_line_indent}{first_line}"
if first_line
else other_item_marker
)
for line in line_iterator:
formatted_lines.append(" " * indent_width + line if line else "")
text += "\n".join(formatted_lines)
if list_item_index != len(node.children) - 1:
text += block_separator
return text
DEFAULT_RENDERERS: Mapping[str, Render] = MappingProxyType(
{
"inline": make_render_children(""),
"root": make_render_children("\n\n"),
"hr": hr,
"code_inline": code_inline,
"html_block": html_block,
"html_inline": html_inline,
"hardbreak": hardbreak,
"softbreak": softbreak,
"text": text,
"fence": fence,
"code_block": code_block,
"link": link,
"image": image,
"em": em,
"strong": strong,
"heading": heading,
"blockquote": blockquote,
"paragraph": paragraph,
"bullet_list": bullet_list,
"ordered_list": ordered_list,
"list_item": list_item,
}
)
class RenderContext(NamedTuple):
"""A collection of data that is passed as input to `Render` and
`Postprocess` functions."""
renderers: Mapping[str, Render]
postprocessors: Mapping[str, Iterable[Postprocess]]
options: Mapping[str, Any]
env: MutableMapping
@contextmanager
def indented(self, width: int) -> Generator[None, None, None]:
self.env["indent_width"] += width
try:
yield
finally:
self.env["indent_width"] -= width
@property
def do_wrap(self) -> bool:
wrap_mode = self.options.get("mdformat", {}).get("wrap", DEFAULT_OPTS["wrap"])
return isinstance(wrap_mode, int) or wrap_mode == "no"
def with_default_renderer_for(self, *syntax_names: str) -> RenderContext:
renderers = dict(self.renderers)
for syntax in syntax_names:
if syntax in DEFAULT_RENDERERS:
renderers[syntax] = DEFAULT_RENDERERS[syntax]
else:
renderers.pop(syntax, None)
return RenderContext(
MappingProxyType(renderers), self.postprocessors, self.options, self.env
)<|fim▁end|> | if can_break_paragraph and opening_re.search(lines[i]):
lines[i] = f" {lines[i]}"
break |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>var Chartist = require('chartist');
module.exports = makePluginInstance;
makePluginInstance.calculateScaleFactor = calculateScaleFactor;
makePluginInstance.scaleValue = scaleValue;
function makePluginInstance(userOptions) {
var defaultOptions = {
dot: {
min: 8,
max: 10,
unit: 'px'
},
line: {
min: 2,
max: 4,
unit: 'px'
},
svgWidth: {
min: 360,
max: 1000
}
};
var options = Chartist.extend({}, defaultOptions, userOptions);
return function scaleLinesAndDotsInstance(chart) {
var actualSvgWidth;
chart.on('draw', function(data) {
if (data.type === 'point') {
setStrokeWidth(data, options.dot, options.svgWidth);<|fim▁hole|> setStrokeWidth(data, options.line, options.svgWidth);
}
});
/**
* Set stroke-width of the element of a 'data' object, based on chart width.
*
* @param {Object} data - Object passed to 'draw' event listener
* @param {Object} widthRange - Specifies min/max stroke-width and unit.
* @param {Object} thresholds - Specifies chart width to base scaling on.
*/
function setStrokeWidth(data, widthRange, thresholds) {
var scaleFactor = calculateScaleFactor(thresholds.min, thresholds.max, getActualSvgWidth(data));
var strokeWidth = scaleValue(widthRange.min, widthRange.max, scaleFactor);
data.element.attr({
style: 'stroke-width: ' + strokeWidth + widthRange.unit
});
}
/**
* @param {Object} data - Object passed to 'draw' event listener
*/
function getActualSvgWidth(data) {
return data.element.root().width();
}
};
}
function calculateScaleFactor(min, max, value) {
if (max <= min) {
throw new Error('max must be > min');
}
var delta = max - min;
var scaleFactor = (value - min) / delta;
scaleFactor = Math.min(scaleFactor, 1);
scaleFactor = Math.max(scaleFactor, 0);
return scaleFactor;
}
function scaleValue(min, max, scaleFactor) {
if (scaleFactor > 1) throw new Error('scaleFactor cannot be > 1');
if (scaleFactor < 0) throw new Error('scaleFactor cannot be < 0');
if (max < min) throw new Error('max cannot be < min');
var delta = max - min;
return (delta * scaleFactor) + min;
}<|fim▁end|> | } else if (data.type === 'line') { |
<|file_name|>while_loops.rs<|end_file_name|><|fim▁begin|>// This test case tests the incremental compilation hash (ICH) implementation
// for `while` loops.
// The general pattern followed here is: Change one thing between rev1 and rev2
// and make sure that the hash has changed, then change nothing between rev2 and
// rev3 and make sure that the hash has not changed.
// build-pass (FIXME(62277): could be check-pass?)
// revisions: cfail1 cfail2 cfail3 cfail4 cfail5 cfail6
// compile-flags: -Z query-dep-graph
// [cfail1]compile-flags: -Zincremental-ignore-spans
// [cfail2]compile-flags: -Zincremental-ignore-spans
// [cfail3]compile-flags: -Zincremental-ignore-spans
// [cfail4]compile-flags: -Zincremental-relative-spans
// [cfail5]compile-flags: -Zincremental-relative-spans
// [cfail6]compile-flags: -Zincremental-relative-spans
#![allow(warnings)]
#![feature(rustc_attrs)]
#![crate_type="rlib"]
// Change loop body
#[cfg(any(cfail1,cfail4))]
pub fn change_loop_body() {
let mut _x = 0;
while true {
_x = 1;
break;
}
}
#[cfg(not(any(cfail1,cfail4)))]
#[rustc_clean(cfg="cfail2", except="hir_owner_nodes, optimized_mir")]
#[rustc_clean(cfg="cfail3")]
#[rustc_clean(cfg="cfail5", except="hir_owner_nodes, optimized_mir")]
#[rustc_clean(cfg="cfail6")]
pub fn change_loop_body() {
let mut _x = 0;
while true {
_x = 2;
break;
}
}
// Change loop body
#[cfg(any(cfail1,cfail4))]
pub fn change_loop_condition() {
let mut _x = 0;
while true {
_x = 1;
break;
}
}
#[cfg(not(any(cfail1,cfail4)))]
#[rustc_clean(cfg="cfail2", except="hir_owner_nodes, optimized_mir")]
#[rustc_clean(cfg="cfail3")]
#[rustc_clean(cfg="cfail5", except="hir_owner_nodes, optimized_mir")]
#[rustc_clean(cfg="cfail6")]
pub fn change_loop_condition() {
let mut _x = 0;
while false {
_x = 1;
break;
}
}
// Add break
#[cfg(any(cfail1,cfail4))]
pub fn add_break() {
let mut _x = 0;
while true {
_x = 1;
// ---
}
}
#[cfg(not(any(cfail1,cfail4)))]
#[rustc_clean(cfg="cfail2", except="hir_owner_nodes, optimized_mir, typeck")]
#[rustc_clean(cfg="cfail3")]
#[rustc_clean(cfg="cfail5", except="hir_owner_nodes, optimized_mir, typeck")]
#[rustc_clean(cfg="cfail6")]
pub fn add_break() {
let mut _x = 0;
while true {
_x = 1;
break;
}
}
// Add loop label
#[cfg(any(cfail1,cfail4))]
pub fn add_loop_label() {
let mut _x = 0;
while true {
_x = 1;
break;
}
}
#[cfg(not(any(cfail1,cfail4)))]
#[rustc_clean(cfg="cfail2", except="hir_owner_nodes")]
#[rustc_clean(cfg="cfail3")]
#[rustc_clean(cfg="cfail5", except="hir_owner_nodes")]
#[rustc_clean(cfg="cfail6")]
pub fn add_loop_label() {
let mut _x = 0;
'label: while true {
_x = 1;
break;
}
}
// Add loop label to break
#[cfg(any(cfail1,cfail4))]
pub fn add_loop_label_to_break() {
let mut _x = 0;
'label: while true {
_x = 1;
break ;
}
}
#[cfg(not(any(cfail1,cfail4)))]
#[rustc_clean(cfg="cfail2", except="hir_owner_nodes")]
#[rustc_clean(cfg="cfail3")]
#[rustc_clean(cfg="cfail5", except="hir_owner_nodes")]
#[rustc_clean(cfg="cfail6")]
pub fn add_loop_label_to_break() {
let mut _x = 0;
'label: while true {
_x = 1;
break 'label;
}
}
// Change break label<|fim▁hole|>#[cfg(any(cfail1,cfail4))]
pub fn change_break_label() {
let mut _x = 0;
'outer: while true {
'inner: while true {
_x = 1;
break 'inner;
}
}
}
#[cfg(not(any(cfail1,cfail4)))]
#[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir,typeck")]
#[rustc_clean(cfg="cfail3")]
#[rustc_clean(cfg="cfail5", except="hir_owner_nodes,optimized_mir,typeck")]
#[rustc_clean(cfg="cfail6")]
pub fn change_break_label() {
let mut _x = 0;
'outer: while true {
'inner: while true {
_x = 1;
break 'outer;
}
}
}
// Add loop label to continue
#[cfg(any(cfail1,cfail4))]
pub fn add_loop_label_to_continue() {
let mut _x = 0;
'label: while true {
_x = 1;
continue ;
}
}
#[cfg(not(any(cfail1,cfail4)))]
#[rustc_clean(cfg="cfail2", except="hir_owner_nodes")]
#[rustc_clean(cfg="cfail3")]
#[rustc_clean(cfg="cfail5", except="hir_owner_nodes")]
#[rustc_clean(cfg="cfail6")]
pub fn add_loop_label_to_continue() {
let mut _x = 0;
'label: while true {
_x = 1;
continue 'label;
}
}
// Change continue label
#[cfg(any(cfail1,cfail4))]
pub fn change_continue_label() {
let mut _x = 0;
'outer: while true {
'inner: while true {
_x = 1;
continue 'inner;
}
}
}
#[cfg(not(any(cfail1,cfail4)))]
#[rustc_clean(cfg="cfail2", except="hir_owner_nodes,typeck")]
#[rustc_clean(cfg="cfail3")]
#[rustc_clean(cfg="cfail5", except="hir_owner_nodes,typeck,optimized_mir")]
#[rustc_clean(cfg="cfail6")]
pub fn change_continue_label() {
let mut _x = 0;
'outer: while true {
'inner: while true {
_x = 1;
continue 'outer;
}
}
}
// Change continue to break
#[cfg(any(cfail1,cfail4))]
pub fn change_continue_to_break() {
let mut _x = 0;
while true {
_x = 1;
continue;
}
}
#[cfg(not(any(cfail1,cfail4)))]
#[rustc_clean(cfg="cfail2", except="hir_owner_nodes, optimized_mir")]
#[rustc_clean(cfg="cfail3")]
#[rustc_clean(cfg="cfail5", except="hir_owner_nodes, optimized_mir")]
#[rustc_clean(cfg="cfail6")]
pub fn change_continue_to_break() {
let mut _x = 0;
while true {
_x = 1;
break ;
}
}<|fim▁end|> | |
<|file_name|>_logging.py<|end_file_name|><|fim▁begin|>"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import logging
_logger = logging.getLogger('websocket')
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
_logger.addHandler(NullHandler())
_traceEnabled = False
__all__ = ["enableTrace", "dump", "error", "warning", "debug", "trace",
"isEnabledForError", "isEnabledForDebug", "isEnabledForTrace"]
def enableTrace(traceable, handler = logging.StreamHandler()):
"""
turn on/off the traceability.
traceable: boolean value. if set True, traceability is enabled.
"""
global _traceEnabled
_traceEnabled = traceable
if traceable:
_logger.addHandler(handler)
_logger.setLevel(logging.DEBUG)
def dump(title, message):
if _traceEnabled:
_logger.debug("--- " + title + " ---")
_logger.debug(message)
_logger.debug("-----------------------")
def error(msg):
_logger.error(msg)
def warning(msg):
_logger.warning(msg)
<|fim▁hole|>def debug(msg):
_logger.debug(msg)
def trace(msg):
if _traceEnabled:
_logger.debug(msg)
def isEnabledForError():
return _logger.isEnabledFor(logging.ERROR)
def isEnabledForDebug():
return _logger.isEnabledFor(logging.DEBUG)
def isEnabledForTrace():
return _traceEnabled<|fim▁end|> | |
<|file_name|>columngroup.d.ts<|end_file_name|><|fim▁begin|>import * as React from 'react';
// tslint:disable-next-line:no-empty-interface<|fim▁hole|><|fim▁end|> | export interface ColumnGroupProps { }
export declare class ColumnGroup extends React.Component<ColumnGroupProps, any> { } |
<|file_name|>PackingRecipe.java<|end_file_name|><|fim▁begin|>package betterwithaddons.crafting.recipes;
import betterwithaddons.crafting.ICraftingResult;
import betterwithaddons.util.ItemUtil;<|fim▁hole|>import net.minecraft.item.ItemStack;
import net.minecraft.item.crafting.Ingredient;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
public class PackingRecipe
{
public ICraftingResult output;
public List<Ingredient> inputs;
public PackingRecipe(List<Ingredient> inputs, ICraftingResult output) {
this.output = output;
this.inputs = inputs;
}
public ICraftingResult getOutput(List<ItemStack> inputs, IBlockState compressState) {
return this.output.copy();
}
public boolean consume(List<ItemStack> inputs, IBlockState compressState, boolean simulate)
{
inputs = new ArrayList<>(inputs);
for (Ingredient ingredient : this.inputs) {
boolean matches = false;
Iterator<ItemStack> iterator = inputs.iterator();
while(iterator.hasNext()) {
ItemStack checkStack = iterator.next();
if(ingredient.apply(checkStack)) {
if(!simulate)
checkStack.shrink(ItemUtil.getSize(ingredient));
iterator.remove();
matches = true;
}
}
if(!matches)
return false;
}
return true;
}
}<|fim▁end|> | import net.minecraft.block.state.IBlockState; |
<|file_name|>BaseNCodec.java<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.arquillian.warp.utils;
import java.io.UnsupportedEncodingException;
import org.apache.commons.codec.DecoderException;
/**
* Abstract superclass for Base-N encoders and decoders.
*
* <p>
* This class is not thread-safe. Each thread should use its own instance.
* </p>
*/
public abstract class BaseNCodec {
/**
* MIME chunk size per RFC 2045 section 6.8.
*
* <p>
* The {@value} character limit does not count the trailing CRLF, but counts all other characters, including any equal
* signs.
* </p>
*
* @see <a href="http://www.ietf.org/rfc/rfc2045.txt">RFC 2045 section 6.8</a>
*/
public static final int MIME_CHUNK_SIZE = 76;
/**
* PEM chunk size per RFC 1421 section 4.3.2.4.
*
* <p>
* The {@value} character limit does not count the trailing CRLF, but counts all other characters, including any equal
* signs.
* </p>
*
* @see <a href="http://tools.ietf.org/html/rfc1421">RFC 1421 section 4.3.2.4</a>
*/
public static final int PEM_CHUNK_SIZE = 64;
private static final int DEFAULT_BUFFER_RESIZE_FACTOR = 2;
/**
* Defines the default buffer size - currently {@value} - must be large enough for at least one encoded block+separator
*/
private static final int DEFAULT_BUFFER_SIZE = 8192;
/** Mask used to extract 8 bits, used in decoding bytes */
protected static final int MASK_8BITS = 0xff;
/**
* Byte used to pad output.
*/
protected static final byte PAD_DEFAULT = '='; // Allow static access to default
protected final byte PAD = PAD_DEFAULT; // instance variable just in case it needs to vary later
/** Number of bytes in each full block of unencoded data, e.g. 4 for Base64 and 5 for Base32 */
private final int unencodedBlockSize;
/** Number of bytes in each full block of encoded data, e.g. 3 for Base64 and 8 for Base32 */
private final int encodedBlockSize;
/**
* Chunksize for encoding. Not used when decoding. A value of zero or less implies no chunking of the encoded data. Rounded
* down to nearest multiple of encodedBlockSize.
*/
protected final int lineLength;
/**
* Size of chunk separator. Not used unless {@link #lineLength} > 0.
*/
private final int chunkSeparatorLength;
/**
* Buffer for streaming.
*/
protected byte[] buffer;
/**
* Position where next character should be written in the buffer.
*/
protected int pos;
/**
* Position where next character should be read from the buffer.
*/
private int readPos;
/**
* Boolean flag to indicate the EOF has been reached. Once EOF has been reached, this object becomes useless, and must be
* thrown away.
*/
protected boolean eof;
/**
* Variable tracks how many characters have been written to the current line. Only used when encoding. We use it to make
* sure each encoded line never goes beyond lineLength (if lineLength > 0).
*/
protected int currentLinePos;
/**
* Writes to the buffer only occur after every 3/5 reads when encoding, and every 4/8 reads when decoding. This variable
* helps track that.
*/
protected int modulus;
/**
* Note <code>lineLength</code> is rounded down to the nearest multiple of {@link #encodedBlockSize} If
* <code>chunkSeparatorLength</code> is zero, then chunking is disabled.
*
* @param unencodedBlockSize the size of an unencoded block (e.g. Base64 = 3)
* @param encodedBlockSize the size of an encoded block (e.g. Base64 = 4)
* @param lineLength if > 0, use chunking with a length <code>lineLength</code>
* @param chunkSeparatorLength the chunk separator length, if relevant
*/
protected BaseNCodec(int unencodedBlockSize, int encodedBlockSize, int lineLength, int chunkSeparatorLength) {
this.unencodedBlockSize = unencodedBlockSize;
this.encodedBlockSize = encodedBlockSize;
this.lineLength = (lineLength > 0 && chunkSeparatorLength > 0) ? (lineLength / encodedBlockSize) * encodedBlockSize : 0;
this.chunkSeparatorLength = chunkSeparatorLength;
}
/**
* Returns true if this object has buffered data for reading.
*
* @return true if there is data still available for reading.
*/
boolean hasData() { // package protected for access from I/O streams
return this.buffer != null;
}
/**
* Returns the amount of buffered data available for reading.
*
* @return The amount of buffered data available for reading.
*/
int available() { // package protected for access from I/O streams
return buffer != null ? pos - readPos : 0;
}
/**
* Get the default buffer size. Can be overridden.
*
* @return {@link #DEFAULT_BUFFER_SIZE}
*/
protected int getDefaultBufferSize() {
return DEFAULT_BUFFER_SIZE;
}
/** Increases our buffer by the {@link #DEFAULT_BUFFER_RESIZE_FACTOR}. */
private void resizeBuffer() {
if (buffer == null) {
buffer = new byte[getDefaultBufferSize()];
pos = 0;
readPos = 0;
<|fim▁hole|> byte[] b = new byte[buffer.length * DEFAULT_BUFFER_RESIZE_FACTOR];
System.arraycopy(buffer, 0, b, 0, buffer.length);
buffer = b;
}
}
/**
* Ensure that the buffer has room for <code>size</code> bytes
*
* @param size minimum spare space required
*/
protected void ensureBufferSize(int size) {
if ((buffer == null) || (buffer.length < pos + size)) {
resizeBuffer();
}
}
/**
* Extracts buffered data into the provided byte[] array, starting at position bPos, up to a maximum of bAvail bytes.
* Returns how many bytes were actually extracted.
*
* @param b byte[] array to extract the buffered data into.
* @param bPos position in byte[] array to start extraction at.
* @param bAvail amount of bytes we're allowed to extract. We may extract fewer (if fewer are available).
* @return The number of bytes successfully extracted into the provided byte[] array.
*/
int readResults(byte[] b, int bPos, int bAvail) { // package protected for access from I/O streams
if (buffer != null) {
int len = Math.min(available(), bAvail);
System.arraycopy(buffer, readPos, b, bPos, len);
readPos += len;
if (readPos >= pos) {
buffer = null; // so hasData() will return false, and this method can return -1
}
return len;
}
return eof ? -1 : 0;
}
/**
* Checks if a byte value is whitespace or not. Whitespace is taken to mean: space, tab, CR, LF
*
* @param byteToCheck the byte to check
* @return true if byte is whitespace, false otherwise
*/
protected static boolean isWhiteSpace(byte byteToCheck) {
switch (byteToCheck) {
case ' ':
case '\n':
case '\r':
case '\t':
return true;
default:
return false;
}
}
/**
* Resets this object to its initial newly constructed state.
*/
private void reset() {
buffer = null;
pos = 0;
readPos = 0;
currentLinePos = 0;
modulus = 0;
eof = false;
}
/**
* Encodes an Object using the Base-N algorithm. This method is provided in order to satisfy the requirements of the Encoder
* interface, and will throw an IllegalStateException if the supplied object is not of type byte[].
*
* @param pObject Object to encode
* @return An object (of type byte[]) containing the Base-N encoded data which corresponds to the byte[] supplied.
* @throws IllegalStateException if the parameter supplied is not of type byte[]
*/
public Object encode(Object pObject) {
if (!(pObject instanceof byte[])) {
throw new IllegalStateException("Parameter supplied to Base-N encode is not a byte[]");
}
return encode((byte[]) pObject);
}
/**
* Encodes a byte[] containing binary data, into a String containing characters in the Base-N alphabet.
*
* @param pArray a byte array containing binary data
* @return A String containing only Base-N character data
*/
public String encodeToString(byte[] pArray) {
return newStringUtf8(encode(pArray));
}
/**
* Decodes an Object using the Base-N algorithm. This method is provided in order to satisfy the requirements of the Decoder
* interface, and will throw a DecoderException if the supplied object is not of type byte[] or String.
*
* @param pObject Object to decode
* @return An object (of type byte[]) containing the binary data which corresponds to the byte[] or String supplied.
* @throws DecoderException if the parameter supplied is not of type byte[]
*/
public Object decode(Object pObject) throws IllegalStateException {
if (pObject instanceof byte[]) {
return decode((byte[]) pObject);
} else if (pObject instanceof String) {
return decode((String) pObject);
} else {
throw new IllegalStateException("Parameter supplied to Base-N decode is not a byte[] or a String");
}
}
/**
* Decodes a String containing characters in the Base-N alphabet.
*
* @param pArray A String containing Base-N character data
* @return a byte array containing binary data
*/
public byte[] decode(String pArray) {
return decode(getBytesUtf8(pArray));
}
/**
* Decodes a byte[] containing characters in the Base-N alphabet.
*
* @param pArray A byte array containing Base-N character data
* @return a byte array containing binary data
*/
public byte[] decode(byte[] pArray) {
reset();
if (pArray == null || pArray.length == 0) {
return pArray;
}
decode(pArray, 0, pArray.length);
decode(pArray, 0, -1); // Notify decoder of EOF.
byte[] result = new byte[pos];
readResults(result, 0, result.length);
return result;
}
/**
* Encodes a byte[] containing binary data, into a byte[] containing characters in the alphabet.
*
* @param pArray a byte array containing binary data
* @return A byte array containing only the basen alphabetic character data
*/
public byte[] encode(byte[] pArray) {
reset();
if (pArray == null || pArray.length == 0) {
return pArray;
}
encode(pArray, 0, pArray.length);
encode(pArray, 0, -1); // Notify encoder of EOF.
byte[] buf = new byte[pos - readPos];
readResults(buf, 0, buf.length);
return buf;
}
/**
* Encodes a byte[] containing binary data, into a String containing characters in the appropriate alphabet. Uses UTF8
* encoding.
*
* @param pArray a byte array containing binary data
* @return String containing only character data in the appropriate alphabet.
*/
public String encodeAsString(byte[] pArray) {
return newStringUtf8(encode(pArray));
}
abstract void encode(byte[] pArray, int i, int length); // package protected for access from I/O streams
abstract void decode(byte[] pArray, int i, int length); // package protected for access from I/O streams
/**
* Returns whether or not the <code>octet</code> is in the current alphabet. Does not allow whitespace or pad.
*
* @param value The value to test
*
* @return <code>true</code> if the value is defined in the current alphabet, <code>false</code> otherwise.
*/
protected abstract boolean isInAlphabet(byte value);
/**
* Tests a given byte array to see if it contains only valid characters within the alphabet. The method optionally treats
* whitespace and pad as valid.
*
* @param arrayOctet byte array to test
* @param allowWSPad if <code>true</code>, then whitespace and PAD are also allowed
*
* @return <code>true</code> if all bytes are valid characters in the alphabet or if the byte array is empty;
* <code>false</code>, otherwise
*/
public boolean isInAlphabet(byte[] arrayOctet, boolean allowWSPad) {
for (int i = 0; i < arrayOctet.length; i++) {
if (!isInAlphabet(arrayOctet[i]) && (!allowWSPad || (arrayOctet[i] != PAD) && !isWhiteSpace(arrayOctet[i]))) {
return false;
}
}
return true;
}
/**
* Tests a given String to see if it contains only valid characters within the alphabet. The method treats whitespace and
* PAD as valid.
*
* @param basen String to test
* @return <code>true</code> if all characters in the String are valid characters in the alphabet or if the String is empty;
* <code>false</code>, otherwise
* @see #isInAlphabet(byte[], boolean)
*/
public boolean isInAlphabet(String basen) {
return isInAlphabet(getBytesUtf8(basen), true);
}
/**
* Tests a given byte array to see if it contains any characters within the alphabet or PAD.
*
* Intended for use in checking line-ending arrays
*
* @param arrayOctet byte array to test
* @return <code>true</code> if any byte is a valid character in the alphabet or PAD; <code>false</code> otherwise
*/
protected boolean containsAlphabetOrPad(byte[] arrayOctet) {
if (arrayOctet == null) {
return false;
}
for (byte element : arrayOctet) {
if (PAD == element || isInAlphabet(element)) {
return true;
}
}
return false;
}
/**
* Calculates the amount of space needed to encode the supplied array.
*
* @param pArray byte[] array which will later be encoded
*
* @return amount of space needed to encoded the supplied array. Returns a long since a max-len array will require >
* Integer.MAX_VALUE
*/
public long getEncodedLength(byte[] pArray) {
// Calculate non-chunked size - rounded up to allow for padding
// cast to long is needed to avoid possibility of overflow
long len = ((pArray.length + unencodedBlockSize - 1) / unencodedBlockSize) * (long) encodedBlockSize;
if (lineLength > 0) { // We're using chunking
// Round up to nearest multiple
len += ((len + lineLength - 1) / lineLength) * chunkSeparatorLength;
}
return len;
}
/**
* Constructs a new <code>String</code> by decoding the specified array of bytes using the UTF-8 charset.
*
* @param bytes The bytes to be decoded into characters
* @return A new <code>String</code> decoded from the specified array of bytes using the UTF-8 charset, or <code>null</code>
* if the input byte array was <code>null</code>.
* @throws IllegalStateException Thrown when a {@link UnsupportedEncodingException} is caught, which should never happen
* since the charset is required.
*/
public static String newStringUtf8(byte[] bytes) {
if (bytes == null) {
return null;
}
try {
return new String(bytes, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("UTF-8", e);
}
}
/**
* Encodes the given string into a sequence of bytes using the UTF-8 charset, storing the result into a new byte array.
*
* @param string the String to encode, may be <code>null</code>
* @return encoded bytes, or <code>null</code> if the input string was <code>null</code>
* @throws IllegalStateException Thrown when the charset is missing, which should be never according the the Java
* specification.
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
* @see #getBytesUnchecked(String, String)
*/
public static byte[] getBytesUtf8(String string) {
if (string == null) {
return null;
}
try {
return string.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("UTF-8", e);
}
}
}<|fim▁end|> | } else {
|
<|file_name|>JavascriptPropertyWrapper.cpp<|end_file_name|><|fim▁begin|>// Copyright © 2010-2014 The CefSharp Project. All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.<|fim▁hole|>#pragma once
#include "Stdafx.h"
#include "JavascriptPropertyWrapper.h"
#include "JavascriptObjectWrapper.h"
using namespace System;
namespace CefSharp
{
void JavascriptPropertyWrapper::Bind()
{
auto propertyName = StringUtils::ToNative(_javascriptProperty->JavascriptName);
auto clrPropertyName = _javascriptProperty->JavascriptName;
if (_javascriptProperty->IsComplexType)
{
auto javascriptObjectWrapper = gcnew JavascriptObjectWrapper(_javascriptProperty->JsObject, _browserProcess);
javascriptObjectWrapper->V8Value = V8Value.get();
javascriptObjectWrapper->Bind();
_javascriptObjectWrapper = javascriptObjectWrapper;
}
else
{
auto propertyAttribute = _javascriptProperty->IsReadOnly ? V8_PROPERTY_ATTRIBUTE_READONLY : V8_PROPERTY_ATTRIBUTE_NONE;
V8Value->SetValue(propertyName, V8_ACCESS_CONTROL_DEFAULT, propertyAttribute);
}
};
}<|fim▁end|> | |
<|file_name|>builder.rs<|end_file_name|><|fim▁begin|>use crate::enums::{CapStyle, DashStyle, LineJoin};
use crate::factory::IFactory;
use crate::stroke_style::StrokeStyle;
use com_wrapper::ComWrapper;
use dcommon::Error;
use winapi::shared::winerror::SUCCEEDED;
use winapi::um::d2d1::D2D1_STROKE_STYLE_PROPERTIES;
pub struct StrokeStyleBuilder<'a> {
factory: &'a dyn IFactory,
start_cap: CapStyle,
end_cap: CapStyle,
dash_cap: CapStyle,
line_join: LineJoin,
miter_limit: f32,
dash_style: DashStyle,
dash_offset: f32,
dashes: Option<&'a [f32]>,
}
impl<'a> StrokeStyleBuilder<'a> {
pub fn new(factory: &'a dyn IFactory) -> Self {
// default values taken from D2D1::StrokeStyleProperties in d2d1helper.h<|fim▁hole|> dash_cap: CapStyle::Flat,
line_join: LineJoin::Miter,
miter_limit: 10.0,
dash_style: DashStyle::Solid,
dash_offset: 0.0,
dashes: None,
}
}
pub fn build(self) -> Result<StrokeStyle, Error> {
unsafe {
let properties = self.to_d2d1();
let (dashes, dash_count) = self
.dashes
.map(|d| (d.as_ptr(), d.len() as u32))
.unwrap_or((std::ptr::null(), 0));
let mut ptr = std::ptr::null_mut();
let hr =
self.factory
.raw_f()
.CreateStrokeStyle(&properties, dashes, dash_count, &mut ptr);
if SUCCEEDED(hr) {
Ok(StrokeStyle::from_raw(ptr))
} else {
Err(hr.into())
}
}
}
pub fn with_start_cap(mut self, start_cap: CapStyle) -> Self {
self.start_cap = start_cap;
self
}
pub fn with_end_cap(mut self, end_cap: CapStyle) -> Self {
self.end_cap = end_cap;
self
}
pub fn with_dash_cap(mut self, dash_cap: CapStyle) -> Self {
self.dash_cap = dash_cap;
self
}
pub fn with_line_join(mut self, line_join: LineJoin) -> Self {
self.line_join = line_join;
self
}
pub fn with_miter_limit(mut self, miter_limit: f32) -> Self {
self.miter_limit = miter_limit;
self
}
pub fn with_dash_style(mut self, dash_style: DashStyle) -> Self {
self.dash_style = dash_style;
self
}
pub fn with_dash_offset(mut self, dash_offset: f32) -> Self {
self.dash_offset = dash_offset;
self
}
pub fn with_dashes(mut self, dashes: &'a [f32]) -> Self {
self.dash_style = DashStyle::Custom;
self.dashes = Some(dashes);
self
}
fn to_d2d1(&self) -> D2D1_STROKE_STYLE_PROPERTIES {
D2D1_STROKE_STYLE_PROPERTIES {
startCap: self.start_cap as u32,
endCap: self.end_cap as u32,
dashCap: self.dash_cap as u32,
lineJoin: self.line_join as u32,
miterLimit: self.miter_limit,
dashStyle: self.dash_style as u32,
dashOffset: self.dash_offset,
}
}
}<|fim▁end|> | StrokeStyleBuilder {
factory,
start_cap: CapStyle::Flat,
end_cap: CapStyle::Flat, |
<|file_name|>firebase-test.py<|end_file_name|><|fim▁begin|>from firebase import firebase
firebase = firebase.FirebaseApplication('https://wapi.firebaseio.com', None)
new_user = 'Ozgur Vatansever'
<|fim▁hole|><|fim▁end|> | result = firebase.post('/users', new_user, name=None, connection=None, params={'print': 'pretty'}, headers={'X_FANCY_HEADER': 'VERY FANCY'})
print result |
<|file_name|>cornerstone.js<|end_file_name|><|fim▁begin|>// This is a manifest file that'll be compiled into including all the files listed below.
// Add new JavaScript/Coffee code in separate files in this directory and they'll automatically
// be included in the compiled file accessible from http://example.com/assets/application.js
// It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the
// the compiled file.
//
//= require jquery
//= require jquery_ujs<|fim▁hole|><|fim▁end|> | //= require_tree . |
<|file_name|>makeBalise.py<|end_file_name|><|fim▁begin|>#! usr/bin/env python
# coding: utf8
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
"""metalex is general tool for lexicographic and metalexicographic activities
Copyright (C) 2017 by Elvis MBONING
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact: [email protected]
---------------------------------------------------------------------------
makeBalise transform extracted articles into well formed xml file.
It can also generate HTML file for article edition
Packages:
>>> sudo apt-get install python-html5lib
>>> sudo apt-get install python-lxml
>>> sudo apt-get install python-bs4
Usage:
>>> from metalex.dicXmilised import *
>>> dicoHtml(save=True)
"""
# ----Internal Modules------------------------------------------------------
import metalex
from .composeArticle import *
from .dicXmlTool import *
# ----External Modules------------------------------------------------------
import re
import sys
import codecs
import os
from bs4 import BeautifulSoup
from random import sample
from shutil import copyfile
from lxml import etree
from termcolor import colored
# -----Exported Functions-----------------------------------------------------
__all__ = ['BaliseXML', 'dico_html']
# -----Global Variables-----------------------------------------------------
components = {
'xml' : {
'metalexMetadata' : ['metalexMetadata', 'projectName', 'author',
'dateCreation', 'comment', 'contributors', 'candidate'],
'metalexContent' : ['article', 'definition', 'example', 'figured', 'contrary',
'entry', 'flexion', 'category', 'gender', 'rection', 'phonetic',
'identificationComponent', 'treatmentComponent', 'cte_cat',
'processingUnit', 'cgr_pt', 'cgr_vrg', 'cgr_fpar', 'cgr_opar',
'cgr_ocrch', 'cgr_fcrch', 'metalexContent', 'cte_gender',
'metalexResultDictionary']
},
'tei' : {
'teiHeader' : ['teiHeader', 'text', 'TEI', 'fileDesc', 'titleStmt',
'title', 'publicationStmt', 'p', 'sourceDesc', 'author'],
'text' : ['body', 'head', 'entry', 'form', 'orth', 'gramGrp',
'sense', 'def', 'cite', 'quote', 'span', 'usg', 'bibl',
'pos', 'genre', 'number', 'pron', 'etym']
},
'lmf' : {
'GlobalInformation' : ['LexicalResource', 'feat', 'p', 'GlobalInformation'],
'Lexicon' : ['Lexicon', 'feat', 'LexicalEntry', 'WordForm',
'Definition', 'Sense', 'Lexicon']
},
'dtd' : ['ELEMENT', 'ATTRIBUTE', 'PCDATA', 'CDATA', 'REQUIRED', 'IMPLIED'],
'xsd' : []
}
codifArticles = []
# ----------------------------------------------------------
def dico_html(save=False):
"""Build HTML editor file of the all articles
:return file: metalexViewerEditor.html
"""
print('\n --- %s %s \n\n' %(colored('Part 4: Generate Output formats', attrs=['bold']), '--'*25))
metalex.plugins
instanceHtml = BaliseHTML()
filepath = metalex.html_template
metalex.utils.create_temp()
if metalex.utils.in_dir('CopymetalexTemplate.html'):
copyfile(filepath, 'CopymetalexTemplate.html')
souphtl = instanceHtml.html_inject('CopymetalexTemplate.html')
if save:
metalex.utils.go_to_dicresult()
name = metalex.currentOcr+'_metalexViewerEditor.html'
with codecs.open(name, 'w') as htmlresult:
htmlresult.write(souphtl)
metalex.utils.create_temp()
os.remove('CopymetalexTemplate.html')
message = "*"+name+"* has correctly been generated > Saved in dicTemp folder"
metalex.logs.manageLog.write_log(message)
else:
souphtl = instanceHtml.html_inject('CopymetalexTemplate.html')
if save:
metalex.utils.go_to_dicresult()
with codecs.open(name, 'w') as htmlresult:
htmlresult.write(souphtl)
metalex.utils.create_temp()
os.remove('CopymetalexTemplate.html')
message = "*"+name+"* has correctly been generated > Saved in dicTemp folder"
metalex.logs.manageLog.write_log(message)
print('\n\n --- %s --------------- \n\n' %colored('MetaLex Processes was ended: consult results data in "dicTemp" folder',
'green', attrs=['bold']))
class BaliseHTML():
def __init__(self):
self.resultHtml = ''
def html_inject(self, template):
"""Create prettify HTML file all previous data generated
:return str: html (prettify by BeautifulSoup)
"""
instanceXml = BaliseXML()
contentxml = instanceXml.put_xml(typ='xml', save=True)
metalex.utils.create_temp()
soupXml = BeautifulSoup(contentxml, "html.parser")
projectconf = metalex.utils.read_conf()
Hauthor, Hname = projectconf['Author'], projectconf['Projectname'],
Hdate,Hcomment = projectconf['Creationdate'], projectconf['Comment']
Hcontrib = projectconf['Contributors']
filetemplate = codecs.open(template, 'r', 'utf-8')
souphtml = BeautifulSoup(filetemplate, "html5lib")
content = souphtml.find('div', attrs={'id': 'all-articles'})
author = content.find('h3', attrs={'id': 'author'})
author.string = 'main: '+Hauthor
date = content.find('h5', attrs={'id': 'date'})
date.string = Hdate
descipt = content.find('p', attrs={'id': 'description'})
descipt.string = Hcomment
contrib = content.find('h4', attrs={'id': 'contributors'})
contrib.string = 'contributors: '+Hcontrib
project = content.find('h4', attrs={'id': 'projetname'})
project.string = Hname
articlesxml = soupXml.findAll('article')
articleshtml = souphtml.find('div', attrs={'id': 'mtl:articles'})
for x in articlesxml:
elementart = BeautifulSoup('<article id=""></article>', 'html5lib')
idart = x.get('id')
artlem = x.get_text()
elementart.article.append(artlem)
elementart.article['id'] = idart
articleshtml.append(elementart.find('article'))
listlemme = souphtml.find('ul', attrs={'id': 'list-articles'})
for x in articlesxml:
art = x.get_text()
idart = x.get('id')
lem = x.find('entry').get_text()
lemme = BeautifulSoup('<li class="w3-hover-light-grey"><span class="lemme" onclick="changeImage('+
"'"+idart+"'"+')">'+lem+'</span><span class="fa fa-plus w3-closebtn" onclick="add('+
"'"+idart+"'"+')"/></li>', 'html5lib')
listlemme.append(lemme.find('li'))
filetemplate.close()
self.resultHtml = souphtml.prettify('utf-8')
return self.resultHtml
class BaliseXML ():
"""Build XML file type (xml|tei|lmf) with global metadata of the project
:param typ: str
:return obj: instance of BaliseXML
"""
def __init__(self, typ="xml"):
self.typ = typ
def build_structure(self, data, Sfile=None, typ='dtd'):
return False
def message(self, name):<|fim▁hole|> " created > Saved in dicTemp folder"
def put_xml(self, typ='xml', save=False):
"""Create well formed (xml|tei|lmf) file with metadata and content xml
:return metalexXml
"""
metadata = self.xml_metadata(typ)
content = self.xml_content(typ)
metalex.utils.go_to_dicresult()
if typ == 'xml':
if save:
name = 'metalex-'+metalex.projectName+'_'+metalex.currentOcr+'.xml'
metalexXml = self.balise(metadata+content, 'metalexResultDictionary',
attr={'xmlns':'https://www.w3schools.com',
'xmlns:xsi':'http://www.w3.org/2001/XMLSchema-in',
'xsi:schemaLocation':'metalexSchemaXML.xsd'})
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
if metalex.utils.in_dir(name):
with codecs.open(name, 'w', 'utf-8') as fle:
fle.write(metalexXmlTree.prettify(formatter=None))
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
else:
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
return metalexXml
else:
metalexXml = self.balise(metadata+content, 'metalexResultDictionary', attr={})
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
print(metalexXmlTree.prettify(formatter=None))
if typ == 'tei':
if save:
name = 'metalex-'+metalex.projectName+'_'+metalex.currentOcr+'-TEI.xml'
metalexXml = self.balise(metadata+content, 'TEI', typ= 'tei')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
if metalex.utils.in_dir(name):
with codecs.open(name, 'w', 'utf-8') as fle:
fle.write(metalexXmlTree.prettify(formatter=None))
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
else:
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
return metalexXml
else:
metalexXml = self.balise(metadata+content, 'TEI', typ= 'tei')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
print(metalexXmlTree.prettify(formatter=None))
if typ == 'lmf':
os.listdir('.')
if save:
name = 'metalex-'+metalex.projectName+'_'+metalex.currentOcr+'-LMF.xml'
metalexXml = self.balise(metadata+content, 'LexicalResource', attr={'dtdVersion':'15'}, typ= 'lmf')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
if metalex.utils.in_dir(name):
with codecs.open(name, 'w', 'utf-8') as fle:
fle.write(metalexXmlTree.prettify(formatter=None))
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
else:
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
return metalexXml
else:
metalexXml = self.balise(metadata+content, 'LexicalResource', attr={'dtdVersion':'15'}, typ= 'lmf')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
print(metalexXmlTree.prettify(formatter=None))
def xml_metadata(self, typ='xml'):
"""Create xml metadata file with configuration of the project
:return str: metadata
"""
metalex.utils.create_temp()
projectconf = metalex.utils.read_conf()
contribtab = projectconf['Contributors'].split(',') \
if projectconf['Contributors'].find(',') else projectconf['Contributors']
contrib = ''
if typ == 'xml':
author = self.balise(projectconf['Author'], 'author', typ)
name = self.balise(projectconf['Projectname'].strip(), 'projectName', typ)
date = self.balise(projectconf['Creationdate'].strip(), 'dateCreation', typ)
comment = self.balise(projectconf['Comment'], 'comment', typ)
if len(contribtab) > 1:
for data in contribtab: contrib += self.balise(data.strip(), 'candidate', typ)
else: contrib = self.balise(''.join(contribtab), 'candidate', typ)
contrib = self.balise(contrib, 'contributors', typ)
cont = name+author+date+comment+contrib
metadataxml = self.balise(cont, 'metalexMetadata', typ)
return metadataxml
if typ == 'tei':
if len(contribtab) > 1:
for data in contribtab:
if len(data) > 2: contrib += self.balise(data.strip(), 'span',
attr={'content':'contributor'}, typ='tei')
else: contrib = self.balise(''.join(contribtab), 'span', typ='tei')
author = self.balise(projectconf['Author'], 'author', typ='tei')
title = self.balise(projectconf['Projectname'], 'title', typ='tei')
RtitleStmt = self.balise(title, 'titleStmt', typ='tei')
pdate = self.balise(projectconf['Creationdate'], 'p', typ='tei')
pcomment = self.balise(projectconf['Comment'], 'p', typ='tei')
pcontrib = self.balise(contrib, 'p', attr={'content':'contributors'}, typ='tei')
Rpubli = self.balise(author+pdate+pcomment+pcontrib, 'publicationStmt', typ='tei')
sourc = self.balise('TEI metadata for metalex project output', 'p', typ='tei')
Rsourc = self.balise(sourc, 'sourceDesc', typ='tei')
RfilD = self.balise(RtitleStmt+Rpubli+Rsourc, 'fileDesc', typ='tei')
metadatatei = self.balise(RfilD, 'teiHeader', typ='tei')
return metadatatei
if typ == 'lmf':
if len(contribtab) > 1:
for data in contribtab:
if len(data) > 2: contrib += data.strip()+', '
else: contrib = ', '.join(contribtab)
enc = self.balise('', 'feat', attr={'att':'languageCoding', 'val':'utf-8'},
typ='lmf', sclose=True)
pauthor = self.balise('', 'feat', attr={'att':'author', 'val':projectconf['Author'].strip()},
typ='lmf', sclose=True)
pdate = self.balise('', 'feat', attr={'att':'dateCreation', 'val':projectconf['Creationdate'].strip()},
typ='lmf', sclose=True)
pname = self.balise('', 'feat', attr={'att':'projectName', 'val':projectconf['Projectname'].strip()},
typ='lmf', sclose=True)
pcomment = self.balise('', 'feat', attr={'att':'comment', 'val':projectconf['Comment'].strip()},
typ='lmf', sclose=True)
pcontrib = self.balise('', 'feat', attr={'att':'contributors', 'val':contrib.strip(', ')},
typ='lmf', sclose=True)
meta = self.balise('', 'p', attr={'att':'meta', 'val':'TEI metadata for metalex project output'},
typ='lmf', sclose=True)
metadatalmf = self.balise(enc+pauthor+pname+meta+pdate+pcomment+pcontrib, 'GlobalInformation', typ='lmf')
return metadatalmf
def balise_content_article (self):
data = get_data_articles('text')
cod = StructuredWithCodif(data, 'xml')
resultArticles = []
for art in cod.format_articles():
article_type_form(art)
if article_type_form(art) == '1':
partArt = re.search(r'(([a-zéèàûô]+)\s(<cte_cat>.+</cte_cat>)\s(.+)<cgr_pt>\.</cgr_pt>)', art, re.I)
if partArt != None:
ident, entry, cat, treat = partArt.group(1), partArt.group(2), partArt.group(3), partArt.group(4)
id = generate_id()
entry = self.balise(entry, 'entry')
ident = self.balise(entry+cat, 'identificationComponent')
treat = self.balise(self.balise(treat, 'definition'), 'processingUnit')
article = self.balise(ident+self.balise(treat, 'treatmentComponent'), 'article', attr={'id':id})
resultArticles.append(article)
if article_type_form(art) == '2':
research = r'(([a-zéèàûô]+)\s(<cte_cat>.+</cte_cat>\s<cte_gender>..</cte_gender>)\s(.+)<cgr_pt>\.</cgr_pt>)'
partArt = re.search(research, art, re.I)
if partArt != None:
ident, entry, cat, treat = partArt.group(1), partArt.group(2), partArt.group(3), partArt.group(4)
id = generate_id()
entry = self.balise(entry, 'entry')
ident = self.balise(entry+cat, 'identificationComponent')
if not re.search(r'(<cgr_pt>\.</cgr_pt>|<cte_cat>.+</cte_cat>|<cgr_vrg>,</cgr_vrg>)', partArt.group(4), re.I):
treat = self.balise(self.balise(treat+'.', 'definition'), 'processingUnit')
article = self.balise(ident+self.balise(treat, 'treatmentComponent'), 'article', attr={'id':id})
resultArticles.append(article)
elif partArt.group(4).find(' et ') != -1:
suite = 'hahaha'
return resultArticles
def xml_content(self, typ='xml', forme='text'):
"""Create xml content file (representing articles) with data articles extracting
:return str: contentXml
"""
content = ''
contentXml = ''
data = self.balise_content_article()
if typ == 'xml':
if forme == 'pickle':
data = get_data_articles('pickle')
for dicart in data:
for art in dicart.keys():
art = self.balise(dicart[art], 'article', art=True)
content += art
contentXml = self.balise(content, 'metalexContent')
return contentXml
else:
for art in data: content += art
contentXml = self.balise(content, 'metalexContent', attr={'totalArticle': str(len(data))})
return contentXml
if typ == 'tei':
for art in data:
soupart = BeautifulSoup(art, 'html.parser')
orth = soupart.find('entry').getText()
atOrth = soupart.find('article').get('id')
orth = self.balise(orth, 'orth', {'id': atOrth}, typ='tei')
formB = self.balise(orth, 'form', attr={'xml:lang':'fr', 'type':'lemma'}, typ='tei')
pos = soupart.find('cte_cat').getText()
posB = self.balise(pos, 'pos', typ='tei')
genB = ''
if soupart.find('cte_gender'): genB = soupart.find('cte_gender').getText().strip()
if genB == 'f.' or genB == 'm.': genB = self.balise(genB, 'genre', typ='tei')
gramgrp = self.balise(posB+genB, 'gramGrp', typ='tei')
sens = soupart.find('processingunit').getText().replace(' .', '.')
defi = self.balise(sens, 'def', typ='tei')
if sens != None: sens = self.balise(defi, 'sense', typ='tei')
entry = self.balise(formB+gramgrp+sens, 'entry', typ='tei')
content += entry
body = self.balise(content, 'body', typ='tei')
contentXml = self.balise(body, 'text', attr={'totalArticle': str(len(data))}, typ='tei')
return contentXml
if typ == 'lmf':
for art in data:
soupart = BeautifulSoup(art, 'html.parser')
orth = soupart.find('entry').getText()
atOrth = soupart.find('article').get('id')
orth = self.balise('', 'feat', attr={'att':'writtenForm','val':orth},
typ='lmf', sclose=True)
wordF = self.balise(orth, 'WordForm', attr={'id': atOrth}, typ='lmf')
pos = soupart.find('cte_cat').getText()
posB = self.balise('', 'feat', attr={'att':'partOfSpeech','val':pos},
typ='lmf', sclose=True)
genB = ''
if soupart.find('cte_gender'): genB = soupart.find('cte_gender').getText().strip()
if genB == 'f.' or genB == 'm.':
genB = self.balise('', 'feat', attr={'att':'grammaticalNumber','val': genB},
typ='lmf', sclose=True)
sens = soupart.find('processingunit').getText().replace(' .', '.')
sensnb = self.balise('', 'feat', attr={'att':'sensNumber','val':'1'},
typ='lmf', sclose=True)
definb = self.balise('', 'feat', attr={'att':'text','val':sens.strip()},
typ='lmf', sclose=True)
defi = self.balise(definb, 'Definition', typ='lmf')
if sens != None: sens = self.balise(sensnb+defi, 'Sense', typ='lmf')
entry = self.balise(wordF+posB+genB+sens, 'LexicalEntry', typ='lmf')
content += entry
body = self.balise('', 'feat', attr={'att':'language','val':'fra'},
typ='lmf', sclose=True)+content
contentXml = self.balise(body, 'Lexicon', attr={'totalArticle': str(len(data))}, typ='lmf')
return contentXml
def balise(self, element, markup, sclose=False, attr=None, typ='xml', art=False):
"""Markup data with a specific format type (xml|tei|lmf)
:return str: balised element
"""
if typ == 'xml':
if markup in components['xml']['metalexContent'] or markup \
in components['xml']['metalexMetadata']:
if art:
element = self.chevron(markup, attr, art=True)+element+self.chevron(markup, attr, False)
return element
else:
element = self.chevron(markup, attr)+element+self.chevron(markup, attr, False)
return element
if typ == 'tei':
if markup in components['tei']['text'] or markup in components['tei']['teiHeader']:
if art:
element = self.chevron(markup, attr, art=True)+element+self.chevron(markup, attr, False)
return element
else:
element = self.chevron(markup, attr)+element+self.chevron(markup, attr, False)
return element
if typ == 'lmf':
if markup in components['lmf']['GlobalInformation'] \
or components['lmf']['Lexicon']:
if sclose:
element = self.chevron(markup, attr, True, sclose=True)
return element
else:
element = self.chevron(markup, attr)+element+self.chevron(markup, attr, False)
return element
def chevron(self, el, attr, openchev=True, art=False, sclose=False):
"""Put tag around the data of element
:return str: tagging element
"""
idart = generate_id()
if art and attr == None:
if openchev : return "<"+el+" id='"+idart+"' class='data-entry'"+">"
if not openchev: return "</"+el+">"
if sclose : return "<"+el+" id='"+idart+"'/>"
if art and attr != None:
allattrib = ''
for at in attr.keys():
allattrib += ' '+at+'="'+attr[at]+'"'
if openchev and not sclose : return "<"+el+" id='"+idart+"' class='data-entry'"+' '+allattrib+">"
if openchev and sclose: return "<"+el+" id='"+idart+"' class='data-entry'"+' '+allattrib+"/>"
if not openchev: return "</"+el+">"
elif art == False and attr != None:
#print openchev
allattrib = ''
for at in attr.keys(): allattrib += ' '+at+'="'+attr[at]+'"'
if openchev and not sclose: return "<"+el+' '+allattrib+">"
if openchev and sclose: return "<"+el+' '+allattrib+"/>"
if not openchev: return "</"+el+">"
elif art == False and attr == None:
if openchev : return "<"+el+">"
if sclose : return "<"+el+"/>"
if not openchev: return "</"+el+">"<|fim▁end|> | return "*"+name+"* dictionary articles formated in xml is"+\ |
<|file_name|>xw_reporting.py<|end_file_name|><|fim▁begin|>import warnings
from pathlib import Path
from typing import Union
try:
import xlwings as xw
except ImportError:
xw = None
from larray.util.misc import _positive_integer
from larray.core.group import _translate_sheet_name
from larray.core.array import asarray, zip_array_items
from larray.example import load_example_data, EXAMPLE_EXCEL_TEMPLATES_DIR
_default_items_size = {}
def _validate_template_filename(filename: Union[str, Path]) -> Path:
if isinstance(filename, str):
filename = Path(filename)
suffix = filename.suffix
if not suffix:
suffix = '.crtx'
if suffix != '.crtx':
raise ValueError(f"Extension for the excel template file must be '.crtx' instead of {suffix}")
return filename.with_suffix(suffix)
class AbstractReportItem:
def __init__(self, template_dir=None, template=None, graphs_per_row=1):
self.template_dir = template_dir
self.template = template
self.default_items_size = _default_items_size.copy()
self.graphs_per_row = graphs_per_row
@property
def template_dir(self):
r"""
Set the path to the directory containing the Excel template files (with '.crtx' extension).
This method is mainly useful if your template files are located in several directories,
otherwise pass the template directory directly the ExcelReport constructor.
Parameters
----------
template_dir : str or Path
Path to the directory containing the Excel template files.
See Also
--------
set_graph_template
Examples
--------
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> # ... add some graphs using template files from 'C:\excel_templates_dir'
>>> report.template_dir = r'C:\other_templates_dir' # doctest: +SKIP
>>> # ... add some graphs using template files from 'C:\other_templates_dir'
"""
return self._template_dir
@template_dir.setter
def template_dir(self, template_dir):
if template_dir is not None:
if isinstance(template_dir, str):
template_dir = Path(template_dir)
if not isinstance(template_dir, Path):
raise TypeError(f"Expected a string or a pathlib.Path object. "
f"Got an object of type {type(template_dir).__name__} instead.")
if not template_dir.is_dir():
raise ValueError(f"The directory {template_dir} could not be found.")
self._template_dir = template_dir
@property
def template(self):
r"""
Set a default Excel template file.
Parameters
----------
template : str or Path
Name of the template to be used as default template.
The extension '.crtx' will be added if not given.
The full path to the template file must be given if no template directory has been set.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
Passing the name of the template (only if a template directory has been set)
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> report.template = 'Line'
>>> sheet_population = report.new_sheet('Population')
>>> sheet_population.add_graph(demo.population['Belgium'],'Belgium')
Passing the full path of the template file
>>> # if no default template directory has been set
>>> # or if the new template is located in another directory,
>>> # you must provide the full path
>>> sheet_population.template = r'C:\other_templates_dir\Line_Marker.crtx' # doctest: +SKIP
>>> sheet_population.add_graph(demo.population['Germany'],'Germany') # doctest: +SKIP
"""
return self._template
@template.setter
def template(self, template):
if template is not None:
if self.template_dir is None:
raise RuntimeError("Please set 'template_dir' first")
filename = _validate_template_filename(template)
template = self.template_dir / filename
self._template = template
def set_item_default_size(self, kind, width=None, height=None):
r"""
Override the default 'width' and 'height' values for the given kind of item.
A new value must be provided at least for 'width' or 'height'.
Parameters
----------
kind : str
kind of item for which default values of 'width' and/or 'height' are modified.
Currently available kinds are 'title' and 'graph'.
width : int, optional
new default width value.
height : int, optional
new default height value.
Examples
--------
>>> report = ExcelReport()
>>> report.set_item_default_size('graph', width=450, height=250)
"""
if width is None and height is None:
raise ValueError("No value provided for both 'width' and 'heigth'. "
"Please provide one for at least 'width' or 'heigth'")
if kind not in self.default_items_size:
item_types = sorted(self.default_items_size.keys())
raise ValueError(f"Item type {kind} is not registered. Please choose in list {item_types}")
if width is None:
width = self.default_items_size[kind].width
if height is None:
height = self.default_items_size[kind].height
self.default_items_size[kind] = ItemSize(width, height)
@property
def graphs_per_row(self):
r"""
Default number of graphs per row.
Parameters
----------
graphs_per_row: int
See Also
--------
ReportSheet.newline
"""
return self._graphs_per_row
@graphs_per_row.setter
def graphs_per_row(self, graphs_per_row):
_positive_integer(graphs_per_row)
self._graphs_per_row = graphs_per_row
class AbstractReportSheet(AbstractReportItem):
r"""
Represents a sheet dedicated to contains only graphical items (title banners, graphs).
See :py:obj:`ExcelReport` for use cases.
Parameters
----------
template_dir : str or Path, optional
Path to the directory containing the Excel template files (with a '.crtx' extension).
Defaults to None.
template : str or Path, optional
Name of the template to be used as default template.
The extension '.crtx' will be added if not given.
The full path to the template file must be given if no template directory has been set.
Defaults to None.
graphs_per_row : int, optional
Default number of graphs per row. Defaults to 1.
See Also
--------
ExcelReport
"""
def add_title(self, title, width=None, height=None, fontsize=11):
r"""
Add a title item to the current sheet.
Note that the current method only add a new item to the list of items to be generated.
The report Excel file is generated only when the :py:obj:`~ExcelReport.to_excel` is called.
Parameters
----------
title : str
Text to write in the title item.
width : int, optional
width of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
height : int, optional
height of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
fontsize : int, optional
fontsize of the displayed text. Defaults to 11.
Examples
--------
>>> report = ExcelReport()
>>> first_sheet = report.new_sheet('First_sheet')
>>> first_sheet.add_title('Title banner with default width, height and fontsize')
>>> first_sheet.add_title('Larger title banner', width=1200, height=100)
>>> first_sheet.add_title('Bigger fontsize', fontsize=13)
>>> # do not forget to call 'to_excel' to create the report file
>>> report.to_excel('Report.xlsx')
"""
pass
def add_graph(self, data, title=None, template=None, width=None, height=None, min_y=None, max_y=None,
xticks_spacing=None, customize_func=None, customize_kwargs=None):
r"""
Add a graph item to the current sheet.
Note that the current method only add a new item to the list of items to be generated.
The report Excel file is generated only when the :py:obj:`~ExcelReport.to_excel` is called.
Parameters
----------
data : 1D or 2D array-like
1D or 2D array representing the data associated with the graph.
The first row represents the abscissa labels.
Each additional row represents a new series and must start with the name of the current series.
title : str, optional
title of the graph. Defaults to None.
template : str or Path, optional
name of the template to be used to generate the graph.
The full path to the template file must be provided if no template directory has not been set
or if the template file belongs to another directory.
Defaults to the defined template (see :py:obj:`~ExcelReport.set_graph_template`).
width : int, optional
width of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
height : int, optional
height of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
min_y: int, optional
minimum value for the Y axis.
max_y: int, optional
maximum value for the Y axis.
xticks_spacing: int, optional
space interval between two ticks along the X axis.
customize_func: function, optional
user defined function to personalize the graph.
The function must take the Chart object as first argument.
All keyword arguments defined in customize_kwargs are passed to the function at call.
customize_kwargs: dict, optional
keywords arguments passed to the function `customize_func` at call.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> sheet_be = report.new_sheet('Belgium')
Specifying the 'template'
>>> sheet_be.add_graph(demo.population['Belgium'], 'Population', template='Line')
Specifying the 'template', 'width' and 'height' values
>>> sheet_be.add_graph(demo.births['Belgium'], 'Births', template='Line', width=450, height=250)
Setting a default template
>>> sheet_be.template = 'Line_Marker'
>>> sheet_be.add_graph(demo.deaths['Belgium'], 'Deaths')
Specify the mininum and maximum values for the Y axis
>>> sheet_be.add_graph(demo.population['Belgium'], 'Population (min/max Y axis = 5/6 millions)', min_y=5e6, max_y=6e6)
Specify the interval between two ticks (X axis)
>>> sheet_be.add_graph(demo.population['Belgium'], 'Population (every 2 years)', xticks_spacing=2)
Dumping the report Excel file
>>> # do not forget to call 'to_excel' to create the report file
>>> report.to_excel('Demography_Report.xlsx')
"""
pass
def add_graphs(self, array_per_title, axis_per_loop_variable, template=None, width=None, height=None,
graphs_per_row=1, min_y=None, max_y=None, xticks_spacing=None, customize_func=None,
customize_kwargs=None):
r"""
Add multiple graph items to the current sheet. This method is mainly useful when multiple
graphs are generated by iterating over one or several axes of an array (see examples below).
The report Excel file is generated only when the :py:obj:`~ExcelReport.to_excel` is called.
Parameters
----------
array_per_title: dict
dictionary containing pairs (title template, array).
axis_per_loop_variable: dict
dictionary containing pairs (variable used in the title template, axis).
template : str or Path, optional
name of the template to be used to generate the graph.
The full path to the template file must be provided if no template directory has not been set
or if the template file belongs to another directory.
Defaults to the defined template (see :py:obj:`~ExcelReport.set_graph_template`).
width : int, optional
width of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
height : int, optional
height of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
graphs_per_row: int, optional
Number of graphs per row. Defaults to 1.
min_y: int, optional
minimum value for the Y axis.
max_y: int, optional
maximum value for the Y axis.
xticks_spacing: int, optional
space interval between two ticks along the X axis.
customize_func: function, optional
user defined function to personalize the graph.
The function must take the Chart object as first argument.
All keyword arguments defined in customize_kwargs are passed to the function at call.
customize_kwargs: dict, optional
keywords arguments passed to the function `customize_func` at call.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> sheet_population = report.new_sheet('Population')
>>> population = demo.population
Generate a new graph for each combination of gender and year
>>> sheet_population.add_graphs(
... {'Population of {gender} by country in {year}': population},
... {'gender': population.gender, 'year': population.time},
... template='line', width=450, height=250, graphs_per_row=2)
Specify the mininum and maximum values for the Y axis
>>> sheet_population.add_graphs({'Population of {gender} by country for the year {year}': population},
... {'gender': population.gender, 'year': population.time},
... template='line', width=450, height=250, graphs_per_row=2, min_y=0, max_y=50e6)
Specify the interval between two ticks (X axis)
>>> sheet_population.add_graphs({'Population of {gender} by country for the year {year}': population},
... {'gender': population.gender, 'year': population.time},
... template='line', width=450, height=250, graphs_per_row=2, xticks_spacing=2)
>>> # do not forget to call 'to_excel' to create the report file
>>> report.to_excel('Demography_Report.xlsx')
"""
pass
def newline(self):
r"""
Force a new row of graphs.
"""
pass
class AbstractExcelReport(AbstractReportItem):
r"""
Automate the generation of multiple graphs in an Excel file.
The ExcelReport instance is initially populated with information
(data, title, destination sheet, template, size) required to create the graphs.
Once all information has been provided, the :py:obj:`~ExcelReport.to_excel` method
is called to generate an Excel file with all graphs in one step.
Parameters
----------
template_dir : str or Path, optional
Path to the directory containing the Excel template files (with a '.crtx' extension).
Defaults to None.
template : str or Path, optional
Name of the template to be used as default template.
The extension '.crtx' will be added if not given.
The full path to the template file must be given if no template directory has been set.
Defaults to None.
graphs_per_row: int, optional
Default number of graphs per row.
Defaults to 1.
Notes
-----
The data associated with all graphical items is dumped in the same sheet named '__data__'.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
Set a new destination sheet
>>> sheet_be = report.new_sheet('Belgium')
Add a new title item
>>> sheet_be.add_title('Population, births and deaths')
Add a new graph item (each new graph is placed right to previous one unless you use newline() or add_title())
>>> # using default 'width' and 'height' values
>>> sheet_be.add_graph(demo.population['Belgium'], 'Population', template='Line')
>>> # specifying the 'width' and 'height' values
>>> sheet_be.add_graph(demo.births['Belgium'], 'Births', template='Line', width=450, height=250)
Override the default 'width' and 'height' values for graphs
>>> sheet_be.set_item_default_size('graph', width=450, height=250)
>>> # add a new graph with the new default 'width' and 'height' values
>>> sheet_be.add_graph(demo.deaths['Belgium'], 'Deaths')
Set a default template for all next graphs
>>> # if a default template directory has been set, just pass the name
>>> sheet_be.template = 'Line'
>>> # otherwise, give the full path to the template file
>>> sheet_be.template = r'C:\other_template_dir\Line_Marker.crtx' # doctest: +SKIP
>>> # add a new graph with the default template
>>> sheet_be.add_graph(demo.population['Belgium', 'Female'], 'Population - Female')
>>> sheet_be.add_graph(demo.population['Belgium', 'Male'], 'Population - Male')
Specify the number of graphs per row
>>> sheet_countries = report.new_sheet('All countries')
>>> sheet_countries.graphs_per_row = 2
>>> for combined_labels, subset in demo.population.items(('time', 'gender')):
... title = ' - '.join([str(label) for label in combined_labels])
... sheet_countries.add_graph(subset, title)
Force a new row of graphs
>>> sheet_countries.newline()
Add multiple graphs at once (add a new graph for each combination of gender and year)
>>> sheet_countries.add_graphs({'Population of {gender} by country in {year}': population},
... {'gender': population.gender, 'year': population.time},
... template='line', width=450, height=250, graphs_per_row=2)
Generate the report Excel file
>>> report.to_excel('Demography_Report.xlsx')
"""
def new_sheet(self, sheet_name):
r"""
Add a new empty output sheet.
This sheet will contain only graphical elements, all data are exported
to a dedicated separate sheet.
Parameters
----------
sheet_name : str
name of the current sheet.
Returns
-------
sheet: ReportSheet
Examples
--------
>>> demo = load_example_data('demography_eurostat')
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> # prepare new output sheet named 'Belgium'
>>> sheet_be = report.new_sheet('Belgium')
>>> # add graph to the output sheet 'Belgium'
>>> sheet_be.add_graph(demo.population['Belgium'], 'Population', template='Line')
"""
pass
def sheet_names(self):
r"""
Returns the names of the output sheets.
Examples
--------
>>> report = ExcelReport()
>>> sheet_population = report.new_sheet('Pop')
>>> sheet_births = report.new_sheet('Births')
>>> sheet_deaths = report.new_sheet('Deaths')
>>> report.sheet_names()
['Pop', 'Births', 'Deaths']
"""
pass
def to_excel(self, filepath, data_sheet_name='__data__', overwrite=True):
r"""
Generate the report Excel file.
Parameters
----------
filepath : str or Path
Path of the report file for the dump.
data_sheet_name : str, optional
name of the Excel sheet where all data associated with items is dumped.
Defaults to '__data__'.
overwrite : bool, optional
whether to overwrite an existing report file.
Defaults to True.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> report.template = 'Line_Marker'
>>> for c in demo.country:
... sheet_country = report.new_sheet(c)
... sheet_country.add_graph(demo.population[c], 'Population')
... sheet_country.add_graph(demo.births[c], 'Births')
... sheet_country.add_graph(demo.deaths[c], 'Deaths')
Basic usage
>>> report.to_excel('Demography_Report.xlsx')
Alternative data sheet name
>>> report.to_excel('Demography_Report.xlsx', data_sheet_name='Data Tables') # doctest: +SKIP
Check if ouput file already exists
>>> report.to_excel('Demography_Report.xlsx', overwrite=False) # doctest: +SKIP
Traceback (most recent call last):
...
ValueError: Sheet named 'Belgium' already present in workbook
"""
pass
if xw is not None:
from xlwings.constants import LegendPosition, HAlign, VAlign, ChartType, RowCol, AxisType, Constants
from larray.inout.xw_excel import open_excel
class ItemSize:
def __init__(self, width, height):
self.width = width
self.height = height
@property
def width(self):
return self._width
@width.setter
def width(self, width):
_positive_integer(width)
self._width = width
@property
def height(self):
return self._height
@height.setter
def height(self, height):
_positive_integer(height)
self._height = height
class ExcelTitleItem(ItemSize):
_default_size = ItemSize(1000, 50)
def __init__(self, text, fontsize, top, left, width, height):
ItemSize.__init__(self, width, height)
self.top = top
self.left = left
self.text = str(text)
_positive_integer(fontsize)
self.fontsize = fontsize
def dump(self, sheet, data_sheet, row):
data_cells = data_sheet.Cells
# add title in data sheet
data_cells(row, 1).Value = self.text
# generate title banner in destination sheet
msoShapeRectangle = 1
msoThemeColorBackground1 = 14
sheet_shapes = sheet.Shapes
shp = sheet_shapes.AddShape(Type=msoShapeRectangle, Left=self.left, Top=self.top,
Width=self.width, Height=self.height)
fill = shp.Fill
fill.ForeColor.ObjectThemeColor = msoThemeColorBackground1
fill.Solid()
shp.Line.Visible = False
frame = shp.TextFrame
chars = frame.Characters()
chars.Text = self.text
font = chars.Font
font.Color = 1
font.Bold = True
font.Size = self.fontsize
frame.HorizontalAlignment = HAlign.xlHAlignLeft
frame.VerticalAlignment = VAlign.xlVAlignCenter
shp.SetShapesDefaultProperties()
# update and return current row position in data sheet (+1 for title +1 for blank line)
return row + 2
_default_items_size['title'] = ExcelTitleItem._default_size
class ExcelGraphItem(ItemSize):
_default_size = ItemSize(427, 230)
def __init__(self, data, title, template, top, left, width, height, min_y, max_y,
xticks_spacing, customize_func, customize_kwargs):
ItemSize.__init__(self, width, height)
self.top = top
self.left = left
self.title = str(title) if title is not None else None
data = asarray(data)
if not (1 <= data.ndim <= 2):
raise ValueError(f"Expected 1D or 2D array for data argument. Got array of dimensions {data.ndim}")
self.data = data
if template is not None:
template = Path(template)
if not template.is_file():
raise ValueError(f"Could not find template file {template}")
self.template = template
self.min_y = min_y
self.max_y = max_y
self.xticks_spacing = xticks_spacing
if customize_func is not None and not callable(customize_func):
raise TypeError(f"Expected a function for the argument 'customize_func'. "
f"Got object of type {type(customize_func).__name__} instead.")
self.customize_func = customize_func
self.customize_kwargs = customize_kwargs
def dump(self, sheet, data_sheet, row):
data_range = data_sheet.Range
data_cells = data_sheet.Cells
# write graph title in data sheet
data_cells(row, 1).Value = self.title
row += 1
# dump data to make the graph in data sheet
data = self.data
nb_series = 1 if data.ndim == 1 else data.shape[0]
nb_xticks = data.size if data.ndim == 1 else data.shape[1]
last_row, last_col = row + nb_series, nb_xticks + 1
data_range(data_cells(row, 1), data_cells(last_row, last_col)).Value = data.dump(na_repr=None)
data_cells(row, 1).Value = ''
# generate graph in destination sheet
sheet_charts = sheet.ChartObjects()
obj = sheet_charts.Add(self.left, self.top, self.width, self.height)
obj_chart = obj.Chart
source = data_range(data_cells(row, 1), data_cells(last_row, last_col))
obj_chart.SetSourceData(source)
obj_chart.ChartType = ChartType.xlLine
# title
if self.title is not None:
obj_chart.HasTitle = True
obj_chart.ChartTitle.Caption = self.title
# legend
obj_chart.Legend.Position = LegendPosition.xlLegendPositionBottom
# template
if self.template is not None:
obj_chart.ApplyChartTemplate(self.template)
# min - max on Y axis
if self.min_y is not None:
obj_chart.Axes(AxisType.xlValue).MinimumScale = self.min_y
if self.max_y is not None:
obj_chart.Axes(AxisType.xlValue).MaximumScale = self.max_y
# xticks_spacing
if self.xticks_spacing is not None:
obj_chart.Axes(AxisType.xlCategory).TickLabelSpacing = self.xticks_spacing
obj_chart.Axes(AxisType.xlCategory).TickMarkSpacing = self.xticks_spacing
obj_chart.Axes(AxisType.xlCategory).TickLabelPosition = Constants.xlLow
# user's function (to apply on remaining kwargs)
if self.customize_func is not None:
self.customize_func(obj_chart, **self.customize_kwargs)
# flagflip
if nb_series > 1 and nb_xticks == 1:
obj_chart.PlotBy = RowCol.xlRows
# update and return current row position
return row + nb_series + 2
_default_items_size['graph'] = ExcelGraphItem._default_size
class ReportSheet(AbstractReportSheet):
def __init__(self, excel_report, name, template_dir=None, template=None, graphs_per_row=1):
name = _translate_sheet_name(name)
self.excel_report = excel_report
self.name = name
self.items = []
self.top = 0
self.left = 0
self.position_in_row = 1
self.curline_height = 0
if template_dir is None:
template_dir = excel_report.template_dir
if template is None:
template = excel_report.template
AbstractReportSheet.__init__(self, template_dir, template, graphs_per_row)
def add_title(self, title, width=None, height=None, fontsize=11):
if width is None:
width = self.default_items_size['title'].width
if height is None:
height = self.default_items_size['title'].height
self.newline()
self.items.append(ExcelTitleItem(title, fontsize, self.top, 0, width, height))
self.top += height
def add_graph(self, data, title=None, template=None, width=None, height=None, min_y=None, max_y=None,
xticks_spacing=None, customize_func=None, customize_kwargs=None):
if width is None:
width = self.default_items_size['graph'].width
if height is None:
height = self.default_items_size['graph'].height
if template is not None:
self.template = template
template = self.template
if self.graphs_per_row is not None and self.position_in_row > self.graphs_per_row:
self.newline()
self.items.append(ExcelGraphItem(data, title, template, self.top, self.left, width, height,
min_y, max_y, xticks_spacing, customize_func, customize_kwargs))
self.left += width
self.curline_height = max(self.curline_height, height)
self.position_in_row += 1
def add_graphs(self, array_per_title, axis_per_loop_variable, template=None, width=None, height=None,
graphs_per_row=1, min_y=None, max_y=None, xticks_spacing=None, customize_func=None,
customize_kwargs=None):
loop_variable_names = axis_per_loop_variable.keys()
axes = tuple(axis_per_loop_variable.values())
titles = array_per_title.keys()
arrays = array_per_title.values()
if graphs_per_row is not None:
previous_graphs_per_row = self.graphs_per_row<|fim▁hole|> if self.position_in_row > 1:
self.newline()
for loop_variable_values, arrays_chunk in zip_array_items(arrays, axes=axes):
loop_variables_dict = dict(zip(loop_variable_names, loop_variable_values))
for title_template, array_chunk in zip(titles, arrays_chunk):
title = title_template.format(**loop_variables_dict)
self.add_graph(array_chunk, title, template, width, height, min_y, max_y, xticks_spacing,
customize_func, customize_kwargs)
if graphs_per_row is not None:
self.graphs_per_row = previous_graphs_per_row
def newline(self):
self.top += self.curline_height
self.curline_height = 0
self.left = 0
self.position_in_row = 1
def _to_excel(self, workbook, data_row):
# use first sheet as data sheet
data_sheet = workbook.Worksheets(1)
data_cells = data_sheet.Cells
# write destination sheet name in data sheet
data_cells(data_row, 1).Value = self.name
data_row += 2
# create new empty sheet in workbook (will contain output graphical items)
# Hack, since just specifying "After" is broken in certain environments
# see: https://stackoverflow.com/questions/40179804/adding-excel-sheets-to-end-of-workbook
dest_sheet = workbook.Worksheets.Add(Before=None, After=workbook.Sheets(workbook.Sheets.Count))
dest_sheet.Name = self.name
# for each item, dump data + generate associated graphical items
for item in self.items:
data_row = item.dump(dest_sheet, data_sheet, data_row)
# reset
self.top = 0
self.left = 0
self.curline_height = 0
# return current row in data sheet
return data_row
# TODO : add a new section about this class in the tutorial
class ExcelReport(AbstractExcelReport):
def __init__(self, template_dir=None, template=None, graphs_per_row=1):
AbstractExcelReport.__init__(self, template_dir, template, graphs_per_row)
self.sheets = {}
def sheet_names(self):
return [sheet_name for sheet_name in self.sheets.keys()]
def __getitem__(self, key):
return self.sheets[key]
# TODO : Do not implement __setitem__ and move code below to new_sheet()?
def __setitem__(self, key, value, warn_stacklevel=2):
if not isinstance(value, ReportSheet):
raise ValueError(f"Expected ReportSheet object. Got {type(value).__name__} object instead.")
if key in self.sheet_names():
warnings.warn(f"Sheet '{key}' already exists in the report and will be reset",
stacklevel=warn_stacklevel)
self.sheets[key] = value
def __delitem__(self, key):
del self.sheets[key]
def __repr__(self):
return f'sheets: {self.sheet_names()}'
def new_sheet(self, sheet_name):
sheet = ReportSheet(self, sheet_name, self.template_dir, self.template, self.graphs_per_row)
self.__setitem__(sheet_name, sheet, warn_stacklevel=3)
return sheet
def to_excel(self, filepath, data_sheet_name='__data__', overwrite=True):
with open_excel(filepath, overwrite_file=overwrite) as wb:
# from here on, we use pure win32com objects instead of
# larray.excel or xlwings objects as this is faster
xl_wb = wb.api
# rename first sheet
xl_wb.Worksheets(1).Name = data_sheet_name
# dump items for each output sheet
data_sheet_row = 1
for sheet in self.sheets.values():
data_sheet_row = sheet._to_excel(xl_wb, data_sheet_row)
wb.save()
# reset
self.sheets.clear()
else:
class ReportSheet(AbstractReportSheet):
def __init__(self):
raise Exception("ReportSheet class cannot be instantiated because xlwings is not installed")
class ExcelReport(AbstractExcelReport):
def __init__(self):
raise Exception("ExcelReport class cannot be instantiated because xlwings is not installed")
ExcelReport.__doc__ = AbstractExcelReport.__doc__
ReportSheet.__doc__ = AbstractReportSheet.__doc__<|fim▁end|> | self.graphs_per_row = graphs_per_row |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.