prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>app.js<|end_file_name|><|fim▁begin|>var express = require('express');
var path = require('path');
var favicon = require('serve-favicon');
var logger = require('morgan');
var cookieParser = require('cookie-parser');
var bodyParser = require('body-parser');
var api = require('./routes/api');
var app = express();
// view engine setup
app.set('views', path.join(__dirname, 'views'));
app.set('view engine', 'jade');
// uncomment after placing your favicon in /public
//app.use(favicon(path.join(__dirname, 'public', 'favicon.ico')));
app.use(logger('dev'));
app.use(bodyParser.json());
app.use(bodyParser.urlencoded({ extended: false }));
app.use(cookieParser());
app.use(express.static(path.join(__dirname, 'public')));
app.use(express.static(__dirname + '/dist'));
app.use('/api', api);
app.use('*', function(req, res) {
res.sendfile(__dirname + '/dist/index.html');<|fim▁hole|>// catch 404 and forward to error handler
app.use(function(req, res, next) {
var err = new Error('Not Found');
err.status = 404;
next(err);
});
// error handlers
// development error handler
// will print stacktrace
if (app.get('env') === 'development') {
app.use(function(err, req, res, next) {
res.status(err.status || 500);
res.render('error', {
message: err.message,
error: err
});
});
}
// production error handler
// no stacktraces leaked to user
app.use(function(err, req, res, next) {
res.status(err.status || 500);
res.render('error', {
message: err.message,
error: {}
});
});
module.exports = app;<|fim▁end|> | });
|
<|file_name|>tags_test.go<|end_file_name|><|fim▁begin|>package docker
import (
"testing"
)
func TestLookupImage(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + DEFAULTTAG); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + "fail"); err == nil {<|fim▁hole|> t.Errorf("Expected error, none found")
} else if img != nil {
t.Errorf("Expected 0 image, 1 found")
}
if img, err := runtime.repositories.LookupImage("fail:fail"); err == nil {
t.Errorf("Expected error, none found")
} else if img != nil {
t.Errorf("Expected 0 image, 1 found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageId); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + unitTestImageId); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
}<|fim▁end|> | |
<|file_name|>test_volume_boot_pattern.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#<|fim▁hole|># License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import testtools
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.scenario import manager
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestVolumeBootPattern(manager.EncryptionScenarioTest):
# Boot from volume scenario is quite slow, and needs extra
# breathing room to get through deletes in the time allotted.
TIMEOUT_SCALING_FACTOR = 2
@classmethod
def skip_checks(cls):
super(TestVolumeBootPattern, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
def _create_volume_from_image(self):
img_uuid = CONF.compute.image_ref
vol_name = data_utils.rand_name(
self.__class__.__name__ + '-volume-origin')
return self.create_volume(name=vol_name, imageRef=img_uuid)
def _get_bdm(self, source_id, source_type, delete_on_termination=False):
bd_map_v2 = [{
'uuid': source_id,
'source_type': source_type,
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': delete_on_termination}]
return {'block_device_mapping_v2': bd_map_v2}
def _boot_instance_from_resource(self, source_id,
source_type,
keypair=None,
security_group=None,
delete_on_termination=False):
create_kwargs = dict()
if keypair:
create_kwargs['key_name'] = keypair['name']
if security_group:
create_kwargs['security_groups'] = [
{'name': security_group['name']}]
create_kwargs.update(self._get_bdm(
source_id,
source_type,
delete_on_termination=delete_on_termination))
return self.create_server(image_id='', **create_kwargs)
def _delete_server(self, server):
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
@decorators.idempotent_id('557cd2c2-4eb8-4dce-98be-f86765ff311b')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
@utils.services('compute', 'volume', 'image')
def test_volume_boot_pattern(self):
"""This test case attempts to reproduce the following steps:
* Create in Cinder some bootable volume importing a Glance image
* Boot an instance from the bootable volume
* Write content to the volume
* Delete an instance and Boot a new instance from the volume
* Check written content in the instance
* Create a volume snapshot while the instance is running
* Boot an additional instance from the new snapshot based volume
* Check written content in the instance booted from snapshot
"""
LOG.info("Creating keypair and security group")
keypair = self.create_keypair()
security_group = self._create_security_group()
# create an instance from volume
LOG.info("Booting instance 1 from volume")
volume_origin = self._create_volume_from_image()
instance_1st = self._boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
security_group=security_group)
LOG.info("Booted first instance: %s", instance_1st)
# write content to volume on instance
LOG.info("Setting timestamp in instance %s", instance_1st)
ip_instance_1st = self.get_server_ip(instance_1st)
timestamp = self.create_timestamp(ip_instance_1st,
private_key=keypair['private_key'])
# delete instance
LOG.info("Deleting first instance: %s", instance_1st)
self._delete_server(instance_1st)
# create a 2nd instance from volume
instance_2nd = self._boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
security_group=security_group)
LOG.info("Booted second instance %s", instance_2nd)
# check the content of written file
LOG.info("Getting timestamp in instance %s", instance_2nd)
ip_instance_2nd = self.get_server_ip(instance_2nd)
timestamp2 = self.get_timestamp(ip_instance_2nd,
private_key=keypair['private_key'])
self.assertEqual(timestamp, timestamp2)
# snapshot a volume
LOG.info("Creating snapshot from volume: %s", volume_origin['id'])
snapshot = self.create_volume_snapshot(volume_origin['id'], force=True)
# create a 3rd instance from snapshot
LOG.info("Creating third instance from snapshot: %s", snapshot['id'])
volume = self.create_volume(snapshot_id=snapshot['id'],
size=snapshot['size'])
LOG.info("Booting third instance from snapshot")
server_from_snapshot = (
self._boot_instance_from_resource(source_id=volume['id'],
source_type='volume',
keypair=keypair,
security_group=security_group))
LOG.info("Booted third instance %s", server_from_snapshot)
# check the content of written file
LOG.info("Logging into third instance to get timestamp: %s",
server_from_snapshot)
server_from_snapshot_ip = self.get_server_ip(server_from_snapshot)
timestamp3 = self.get_timestamp(server_from_snapshot_ip,
private_key=keypair['private_key'])
self.assertEqual(timestamp, timestamp3)
@decorators.idempotent_id('05795fb2-b2a7-4c9f-8fac-ff25aedb1489')
@decorators.attr(type='slow')
@utils.services('compute', 'image', 'volume')
def test_create_server_from_volume_snapshot(self):
# Create a volume from an image
boot_volume = self._create_volume_from_image()
# Create a snapshot
boot_snapshot = self.create_volume_snapshot(boot_volume['id'])
# Create a server from a volume snapshot
server = self._boot_instance_from_resource(
source_id=boot_snapshot['id'],
source_type='snapshot',
delete_on_termination=True)
server_info = self.servers_client.show_server(server['id'])['server']
# The created volume when creating a server from a snapshot
created_volume = server_info['os-extended-volumes:volumes_attached']
self.assertNotEmpty(created_volume, "No volume attachment found.")
created_volume_info = self.volumes_client.show_volume(
created_volume[0]['id'])['volume']
# Verify the server was created from the snapshot
self.assertEqual(
boot_volume['volume_image_metadata']['image_id'],
created_volume_info['volume_image_metadata']['image_id'])
self.assertEqual(boot_snapshot['id'],
created_volume_info['snapshot_id'])
self.assertEqual(server['id'],
created_volume_info['attachments'][0]['server_id'])
self.assertEqual(created_volume[0]['id'],
created_volume_info['attachments'][0]['volume_id'])
@decorators.idempotent_id('36c34c67-7b54-4b59-b188-02a2f458a63b')
@utils.services('compute', 'volume', 'image')
def test_create_ebs_image_and_check_boot(self):
# create an instance from volume
volume_origin = self._create_volume_from_image()
instance = self._boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
delete_on_termination=True)
# create EBS image
image = self.create_server_snapshot(instance)
# delete instance
self._delete_server(instance)
# boot instance from EBS image
instance = self.create_server(image_id=image['id'])
# just ensure that instance booted
# delete instance
self._delete_server(instance)
@decorators.idempotent_id('cb78919a-e553-4bab-b73b-10cf4d2eb125')
@testtools.skipUnless(CONF.compute_feature_enabled.attach_encrypted_volume,
'Encrypted volume attach is not supported')
@utils.services('compute', 'volume')
def test_boot_server_from_encrypted_volume_luks(self):
# Create an encrypted volume
volume = self.create_encrypted_volume('nova.volume.encryptors.'
'luks.LuksEncryptor',
volume_type='luks')
self.volumes_client.set_bootable_volume(volume['id'], bootable=True)
# Boot a server from the encrypted volume
server = self._boot_instance_from_resource(
source_id=volume['id'],
source_type='volume',
delete_on_termination=False)
server_info = self.servers_client.show_server(server['id'])['server']
created_volume = server_info['os-extended-volumes:volumes_attached']
self.assertEqual(volume['id'], created_volume[0]['id'])<|fim▁end|> | # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
<|file_name|>custom_delay.rs<|end_file_name|><|fim▁begin|>use std::{thread, time};
<|fim▁hole|>
impl Delay for CustomDelay {
fn delay_ns(ns: u16) {
thread::sleep(time::Duration::new(0, u32::from(ns)));
}
}<|fim▁end|> | use clerk::Delay;
pub struct CustomDelay; |
<|file_name|>BoundNodeCallbackObservable.ts<|end_file_name|><|fim▁begin|>import {Observable} from '../Observable';
import {Subscriber} from '../Subscriber';
import {Subscription} from '../Subscription';
import {Scheduler} from '../Scheduler';
import {tryCatch} from '../util/tryCatch';
import {errorObject} from '../util/errorObject';
import {AsyncSubject} from '../AsyncSubject';
/**
* We need this JSDoc comment for affecting ESDoc.
* @extends {Ignored}
* @hide true
*/
export class BoundNodeCallbackObservable<T> extends Observable<T> {
subject: AsyncSubject<T>;
/* tslint:disable:max-line-length */
static create<R>(callbackFunc: (callback: (err: any, result: R) => any) => any, selector?: void, scheduler?: Scheduler): () => Observable<R>;
static create<T, R>(callbackFunc: (v1: T, callback: (err: any, result: R) => any) => any, selector?: void, scheduler?: Scheduler): (v1: T) => Observable<R>;
static create<T, T2, R>(callbackFunc: (v1: T, v2: T2, callback: (err: any, result: R) => any) => any, selector?: void, scheduler?: Scheduler): (v1: T, v2: T2) => Observable<R>;
static create<T, T2, T3, R>(callbackFunc: (v1: T, v2: T2, v3: T3, callback: (err: any, result: R) => any) => any, selector?: void, scheduler?: Scheduler): (v1: T, v2: T2, v3: T3) => Observable<R>;
static create<T, T2, T3, T4, R>(callbackFunc: (v1: T, v2: T2, v3: T3, v4: T4, callback: (err: any, result: R) => any) => any, selector?: void, scheduler?: Scheduler): (v1: T, v2: T2, v3: T3, v4: T4) => Observable<R>;
static create<T, T2, T3, T4, T5, R>(callbackFunc: (v1: T, v2: T2, v3: T3, v4: T4, v5: T5, callback: (err: any, result: R) => any) => any, selector?: void, scheduler?: Scheduler): (v1: T, v2: T2, v3: T3, v4: T4, v5: T5) => Observable<R>;
static create<T, T2, T3, T4, T5, T6, R>(callbackFunc: (v1: T, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, callback: (err: any, result: R) => any) => any, selector?: void, scheduler?: Scheduler): (v1: T, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6) => Observable<R>;
static create<T>(callbackFunc: Function, selector?: void, scheduler?: Scheduler): (...args: any[]) => Observable<T>;
static create<T>(callbackFunc: Function, selector?: (...args: any[]) => T, scheduler?: Scheduler): (...args: any[]) => Observable<T>;
/* tslint:enable:max-line-length */
/**
* Converts a Node.js-style callback API to a function that returns an
* Observable.
*
* <span class="informal">It's just like {@link bindCallback}, but the
* callback is expected to be of type `callback(error, result)`.</span>
*
* `bindNodeCallback` is not an operator because its input and output are not
* Observables. The input is a function `func` with some parameters, but the
* last parameter must be a callback function that `func` calls when it is
* done. The callback function is expected to follow Node.js conventions,
* where the first argument to the callback is an error, while remaining
* arguments are the callback result. The output of `bindNodeCallback` is a
* function that takes the same parameters as `func`, except the last one (the
* callback). When the output function is called with arguments, it will
* return an Observable where the results will be delivered to.
*
* @example <caption>Read a file from the filesystem and get the data as an Observable</caption>
* import * as fs from 'fs';
* var readFileAsObservable = Rx.Observable.bindNodeCallback(fs.readFile);
* var result = readFileAsObservable('./roadNames.txt', 'utf8');
* result.subscribe(x => console.log(x), e => console.error(e));
*
* @see {@link bindCallback}
* @see {@link from}
* @see {@link fromPromise}
*
* @param {function} func Function with a callback as the last parameter.
* @param {function} selector A function which takes the arguments from the
* callback and maps those a value to emit on the output Observable.
* @param {Scheduler} [scheduler] The scheduler on which to schedule the
* callbacks.
* @return {function(...params: *): Observable} A function which returns the
* Observable that delivers the same values the Node.js callback would
* deliver.
* @static true
* @name bindNodeCallback
* @owner Observable
*/
static create<T>(func: Function,
selector: Function | void = undefined,
scheduler?: Scheduler): (...args: any[]) => Observable<T> {
return (...args: any[]): Observable<T> => {
return new BoundNodeCallbackObservable<T>(func, <any>selector, args, scheduler);
};
}
constructor(private callbackFunc: Function,
private selector: Function,
private args: any[],
public scheduler: Scheduler) {
super();
}
protected _subscribe(subscriber: Subscriber<T | T[]>): Subscription {
const callbackFunc = this.callbackFunc;
const args = this.args;
const scheduler = this.scheduler;
let subject = this.subject;
if (!scheduler) {
if (!subject) {
subject = this.subject = new AsyncSubject<T>();
const handler = function handlerFn(...innerArgs: any[]) {
const source = (<any>handlerFn).source;
const { selector, subject } = source;
const err = innerArgs.shift();
if (err) {
subject.error(err);
} else if (selector) {
const result = tryCatch(selector).apply(this, innerArgs);
if (result === errorObject) {
subject.error(errorObject.e);
} else {
subject.next(result);
subject.complete();
}
} else {
subject.next(innerArgs.length === 1 ? innerArgs[0] : innerArgs);
subject.complete();
}
};
// use named function instance to avoid closure.
(<any>handler).source = this;
const result = tryCatch(callbackFunc).apply(this, args.concat(handler));
if (result === errorObject) {
subject.error(errorObject.e);
}
}
return subject.subscribe(subscriber);
} else {
return scheduler.schedule(dispatch, 0, { source: this, subscriber });
}
}
}
function dispatch<T>(state: { source: BoundNodeCallbackObservable<T>, subscriber: Subscriber<T> }) {
const self = (<Subscription> this);
const { source, subscriber } = state;
// XXX: cast to `any` to access to the private field in `source`.
const { callbackFunc, args, scheduler } = source as any;
let subject = source.subject;
if (!subject) {
subject = source.subject = new AsyncSubject<T>();
const handler = function handlerFn(...innerArgs: any[]) {
const source = (<any>handlerFn).source;
const { selector, subject } = source;
const err = innerArgs.shift();
<|fim▁hole|> if (result === errorObject) {
self.add(scheduler.schedule(dispatchError, 0, { err: errorObject.e, subject }));
} else {
self.add(scheduler.schedule(dispatchNext, 0, { value: result, subject }));
}
} else {
const value = innerArgs.length === 1 ? innerArgs[0] : innerArgs;
self.add(scheduler.schedule(dispatchNext, 0, { value, subject }));
}
};
// use named function to pass values in without closure
(<any>handler).source = source;
const result = tryCatch(callbackFunc).apply(this, args.concat(handler));
if (result === errorObject) {
subject.error(errorObject.e);
}
}
self.add(subject.subscribe(subscriber));
}
interface DispatchNextArg<T> {
subject: AsyncSubject<T>;
value: T;
}
function dispatchNext<T>(arg: DispatchNextArg<T>) {
const { value, subject } = arg;
subject.next(value);
subject.complete();
}
interface DispatchErrorArg<T> {
subject: AsyncSubject<T>;
err: any;
}
function dispatchError<T>(arg: DispatchErrorArg<T>) {
const { err, subject } = arg;
subject.error(err);
}<|fim▁end|> | if (err) {
subject.error(err);
} else if (selector) {
const result = tryCatch(selector).apply(this, innerArgs); |
<|file_name|>agent.go<|end_file_name|><|fim▁begin|>// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package miner
import (
"sync"
"sync/atomic"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/log"
)
type CpuAgent struct {
mu sync.Mutex
workCh chan *Work
stop chan struct{}
quitCurrentOp chan struct{}
returnCh chan<- *Result
chain consensus.ChainReader
engine consensus.Engine
isMining int32 // isMining indicates whether the agent is currently mining
}
func NewCpuAgent(chain consensus.ChainReader, engine consensus.Engine) *CpuAgent {
miner := &CpuAgent{
chain: chain,
engine: engine,
stop: make(chan struct{}, 1),
workCh: make(chan *Work, 1),
}
return miner
}
func (self *CpuAgent) Work() chan<- *Work { return self.workCh }
func (self *CpuAgent) SetReturnCh(ch chan<- *Result) { self.returnCh = ch }
func (self *CpuAgent) Stop() {
if !atomic.CompareAndSwapInt32(&self.isMining, 1, 0) {
return // agent already stopped
}
self.stop <- struct{}{}
done:
// Empty work channel
for {
select {
case <-self.workCh:
default:
break done
}
}
}
func (self *CpuAgent) Start() {
if !atomic.CompareAndSwapInt32(&self.isMining, 0, 1) {
return // agent already started
}
go self.update()
}
func (self *CpuAgent) update() {
out:
for {
select {
case work := <-self.workCh:
self.mu.Lock()
if self.quitCurrentOp != nil {
close(self.quitCurrentOp)
}
self.quitCurrentOp = make(chan struct{})
go self.mine(work, self.quitCurrentOp)
self.mu.Unlock()
case <-self.stop:
self.mu.Lock()
if self.quitCurrentOp != nil {
close(self.quitCurrentOp)
self.quitCurrentOp = nil
}
self.mu.Unlock()
break out
}
}
}
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
if result, err := self.engine.Seal(self.chain, work.Block, stop); result != nil {
log.Info("Successfully sealed new block", "number", result.Number(), "hash", result.Hash())
self.returnCh <- &Result{work, result}
} else {
if err != nil {
log.Warn("Block sealing failed", "err", err)
}
self.returnCh <- nil
}
}<|fim▁hole|> if pow, ok := self.engine.(consensus.PoW); ok {
return int64(pow.Hashrate())
}
return 0
}<|fim▁end|> |
func (self *CpuAgent) GetHashRate() int64 { |
<|file_name|>RouletteMaker.py<|end_file_name|><|fim▁begin|>index = 20
bets = 25
names = ("Plain", "Cheval H", "Cheval V", "Trans", "Trans S", "Carre", "Colonne", "Simple")
for bet in range(bets):
col = 40
# --------------------------------------- money
print("""
when %d =>
if bets_index > %d then
fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14));
fb_a_dat_in <= x"24"; -- $
end if;""" % (index, bet, bet, col))
index += 1
col += 2 # extra space
for m in range(5, -1, -1):
print("""when %d =>
if bets_index > %d then
fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14));
fb_a_dat_in <= ascii_i(bets(%d).money, %d);
end if;""" % (index, bet, bet, col, bet, m))
index += 1
col += 1
if m == 5:
col += 1 # extra space
if m == 2:
print("""when %d =>
if bets_index > %d then
fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14));
fb_a_dat_in <= x"2e"; -- .
end if;""" % (index, bet, bet, col))
index += 1<|fim▁hole|> for n in range(8): # n = index of letter
print("""when %d =>
if bets_index > %d then
fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14));
case bets(%d).kind is""" % (index, bet, bet, col, bet))
for kind in range(1, 9):
if n < len(names[kind-1]) and names[kind-1][n] != ' ':
print(""" when %d => fb_a_dat_in <= x"%02x"; -- %c""" % (kind, ord(names[kind-1][n]), names[kind-1][n]))
print(""" when others => fb_a_dat_in <= x"20"; -- space
end case;
fb_a_dat_in <= x"2e"; -- .
end if;""")
index += 1
col += 1<|fim▁end|> | col += 1
# --------------------------------------- name
col += 1 |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import Colors from './Colors';
import Fonts from './Fonts';
import Metrics from './Metrics';<|fim▁hole|>import Images from './Images';
import ApplicationStyles from './ApplicationStyles';
export { Colors, Fonts, Images, Metrics, ApplicationStyles };<|fim▁end|> | |
<|file_name|>privacyCheckOnTypeParameterReferenceInConstructorParameter.js<|end_file_name|><|fim▁begin|>//// [privacyCheckOnTypeParameterReferenceInConstructorParameter.ts]
export class A<T1>{
constructor(callback: (self: A<T1>) => void) {
var child = new B(this);
}
}
export class B<T2> {<|fim▁hole|>//// [privacyCheckOnTypeParameterReferenceInConstructorParameter.js]
define(["require", "exports"], function (require, exports) {
"use strict";
var A = (function () {
function A(callback) {
var child = new B(this);
}
return A;
}());
exports.A = A;
var B = (function () {
function B(parent) {
}
return B;
}());
exports.B = B;
});
//// [privacyCheckOnTypeParameterReferenceInConstructorParameter.d.ts]
export declare class A<T1> {
constructor(callback: (self: A<T1>) => void);
}
export declare class B<T2> {
constructor(parent: T2);
}<|fim▁end|> | constructor(parent: T2) { }
}
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright: (c) 2012-2014, Michael DeHaan <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import random
import re
import stat
import tempfile
import time
from abc import ABCMeta, abstractmethod
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail
from ansible.executor.module_common import modify_module
from ansible.module_utils.json_utils import _filter_non_json_lines
from ansible.module_utils.six import binary_type, string_types, text_type, iteritems, with_metaclass
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.utils.jsonify import jsonify
from ansible.release import __version__
from ansible.utils.unsafe_proxy import wrap_var
from ansible.vars.clean import remove_internal_keys
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionBase(with_metaclass(ABCMeta, object)):
'''
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
'''
# A set of valid arguments
_VALID_ARGS = frozenset([])
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
self._cleanup_remote_tmp = False
self._supports_check_mode = True
self._supports_async = False
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
self._used_interpreter = None
@abstractmethod
def run(self, tmp=None, task_vars=None):
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
:kwarg tmp: Deprecated parameter. This is no longer used. An action plugin that calls
another one and wants to use the same remote tmp for both should set
self._connection._shell.tmpdir rather than this parameter.
:kwarg task_vars: The variables (host vars, group vars, config vars,
etc) associated with this task.
:returns: dictionary of results from the module
Implementors of action modules may find the following variables especially useful:
* Module parameters. These are stored in self._task.args
"""
result = {}
if tmp is not None:
result['warning'] = ['ActionModule.run() no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir']
del tmp
if self._task.async_val and not self._supports_async:
raise AnsibleActionFail('async is not supported for this task.')
elif self._play_context.check_mode and not self._supports_check_mode:
raise AnsibleActionSkip('check mode is not supported for this task.')
elif self._task.async_val and self._play_context.check_mode:
raise AnsibleActionFail('check mode and async cannot be used on same task.')
# Error if invalid argument is passed
if self._VALID_ARGS:
task_opts = frozenset(self._task.args.keys())
bad_opts = task_opts.difference(self._VALID_ARGS)
if bad_opts:
raise AnsibleActionFail('Invalid options for %s: %s' % (self._task.action, ','.join(list(bad_opts))))
if self._connection._shell.tmpdir is None and self._early_needs_tmp_path():
self._make_tmp_path()
return result
def _remote_file_exists(self, path):
cmd = self._connection._shell.exists(path)
result = self._low_level_execute_command(cmd=cmd, sudoable=True)
if result['rc'] == 0:
return True
return False
<|fim▁hole|> Handles the loading and templating of the module code through the
modify_module() function.
'''
if task_vars is None:
task_vars = dict()
# Search module path(s) for named module.
for mod_type in self._connection.module_implementation_preferences:
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if mod_type == '.ps1':
# win_stat, win_file, and win_copy are not just like their
# python counterparts but they are compatible enough for our
# internal usage
if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:
module_name = 'win_%s' % module_name
# Remove extra quotes surrounding path parameters before sending to module.
if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)
if module_path:
break
else: # This is a for-else: http://bit.ly/1ElPkyg
# Use Windows version of ping module to check module paths when
# using a connection that supports .ps1 suffixes. We check specifically
# for win_ping here, otherwise the code would look for ping.ps1
if '.ps1' in self._connection.module_implementation_preferences:
ping_module = 'win_ping'
else:
ping_module = 'ping'
module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, self._connection.module_implementation_preferences)
if module_path2 is not None:
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
else:
raise AnsibleError("The module %s was not found in configured module paths. "
"Additionally, core modules are missing. If this is a checkout, "
"run 'git pull --rebase' to correct this problem." % (module_name))
# insert shared code and arguments into the module
final_environment = dict()
self._compute_environment_string(final_environment)
(module_data, module_style, module_shebang) = modify_module(module_name, module_path, module_args, self._templar,
task_vars=task_vars,
module_compression=self._play_context.module_compression,
async_timeout=self._task.async_val,
become=self._play_context.become,
become_method=self._play_context.become_method,
become_user=self._play_context.become_user,
become_password=self._play_context.become_pass,
become_flags=self._play_context.become_flags,
environment=final_environment)
return (module_style, module_shebang, module_data, module_path)
def _compute_environment_string(self, raw_environment_out=None):
'''
Builds the environment string to be used when executing the remote task.
'''
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [environments]
# The order of environments matters to make sure we merge
# in the parent's values first so those in the block then
# task 'win' in precedence
for environment in environments:
if environment is None or len(environment) == 0:
continue
temp_environment = self._templar.template(environment)
if not isinstance(temp_environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
# very deliberately using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(temp_environment)
if len(final_environment) > 0:
final_environment = self._templar.template(final_environment)
if isinstance(raw_environment_out, dict):
raw_environment_out.clear()
raw_environment_out.update(final_environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
'''
Determines if a tmp path should be created before the action is executed.
'''
return getattr(self, 'TRANSFERS_FILES', False)
def _is_pipelining_enabled(self, module_style, wrap_async=False):
'''
Determines if we are required and can do pipelining
'''
# any of these require a true
for condition in [
self._connection.has_pipelining,
self._play_context.pipelining or self._connection.always_pipeline_modules, # pipelining enabled for play or connection requires it (eg winrm)
module_style == "new", # old style modules do not support pipelining
not C.DEFAULT_KEEP_REMOTE_FILES, # user wants remote files
not wrap_async or self._connection.always_pipeline_modules, # async does not normally support pipelining unless it does (eg winrm)
self._play_context.become_method != 'su', # su does not work with pipelining,
# FIXME: we might need to make become_method exclusion a configurable list
]:
if not condition:
return False
return True
def _get_admin_users(self):
'''
Returns a list of admin users that are configured for the current shell
plugin
'''
try:
admin_users = self._connection._shell.get_option('admin_users')
except AnsibleError:
# fallback for old custom plugins w/o get_option
admin_users = ['root']
return admin_users
def _is_become_unprivileged(self):
'''
The user is not the same as the connection user and is not part of the
shell configured admin users
'''
# if we don't use become then we know we aren't switching to a
# different unprivileged user
if not self._play_context.become:
return False
# if we use become and the user is not an admin (or same user) then
# we need to return become_unprivileged as True
admin_users = self._get_admin_users()
try:
remote_user = self._connection.get_option('remote_user')
except AnsibleError:
remote_user = self._play_context.remote_user
return bool(self._play_context.become_user not in admin_users + [remote_user])
def _make_tmp_path(self, remote_user=None):
'''
Create and return a temporary path on a remote box.
'''
become_unprivileged = self._is_become_unprivileged()
try:
remote_tmp = self._connection._shell.get_option('remote_tmp')
except AnsibleError:
remote_tmp = '~/.ansible/tmp'
# deal with tmpdir creation
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
# Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
# As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
# This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
if getattr(self._connection, '_remote_is_local', False):
tmpdir = C.DEFAULT_LOCAL_TMP
else:
tmpdir = self._remote_expand_user(remote_tmp, sudoable=False)
cmd = self._connection._shell.mkdtemp(basefile=basefile, system=become_unprivileged, tmpdir=tmpdir)
result = self._low_level_execute_command(cmd, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
if self._play_context.verbosity > 3:
output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr'])
else:
output = (u'SSH encountered an unknown error during the connection. '
'We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')
elif u'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = ('Authentication or permission failure. '
'In some cases, you may have been able to authenticate and did not have permissions on the target directory. '
'Consider changing the remote tmp path in ansible.cfg to a path rooted in "/tmp". '
'Failed command was: %s, exited with result %d' % (cmd, result['rc']))
if 'stdout' in result and result['stdout'] != u'':
output = output + u", stdout output: %s" % result['stdout']
if self._play_context.verbosity > 3 and 'stderr' in result and result['stderr'] != u'':
output += u", stderr output: %s" % result['stderr']
raise AnsibleConnectionFailure(output)
else:
self._cleanup_remote_tmp = True
try:
stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1)
rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1]
except IndexError:
# stdout was empty or just space, set to / to trigger error in next if
rc = '/'
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
self._connection._shell.tmpdir = rc
return rc
def _should_remove_tmp_path(self, tmp_path):
'''Determine if temporary path should be deleted or kept by user request/config'''
return tmp_path and self._cleanup_remote_tmp and not C.DEFAULT_KEEP_REMOTE_FILES and "-tmp-" in tmp_path
def _remove_tmp_path(self, tmp_path):
'''Remove a temporary path we created. '''
if tmp_path is None and self._connection._shell.tmpdir:
tmp_path = self._connection._shell.tmpdir
if self._should_remove_tmp_path(tmp_path):
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
tmp_rm_res = self._low_level_execute_command(cmd, sudoable=False)
if tmp_rm_res.get('rc', 0) != 0:
display.warning('Error deleting remote temporary files (rc: %s, stderr: %s})'
% (tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))
else:
self._connection._shell.tmpdir = None
def _transfer_file(self, local_path, remote_path):
self._connection.put_file(local_path, remote_path)
return remote_path
def _transfer_data(self, remote_path, data):
'''
Copies the module data out to the temporary module path.
'''
if isinstance(data, dict):
data = jsonify(data)
afd, afile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
afo = os.fdopen(afd, 'wb')
try:
data = to_bytes(data, errors='surrogate_or_strict')
afo.write(data)
except Exception as e:
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % to_native(e))
afo.flush()
afo.close()
try:
self._transfer_file(afile, remote_path)
finally:
os.unlink(afile)
return remote_path
def _fixup_perms2(self, remote_paths, remote_user=None, execute=True):
"""
We need the files we upload to be readable (and sometimes executable)
by the user being sudo'd to but we want to limit other people's access
(because the files could contain passwords or other private
information. We achieve this in one of these ways:
* If no sudo is performed or the remote_user is sudo'ing to
themselves, we don't have to change permissions.
* If the remote_user sudo's to a privileged user (for instance, root),
we don't have to change permissions
* If the remote_user sudo's to an unprivileged user then we attempt to
grant the unprivileged user access via file system acls.
* If granting file system acls fails we try to change the owner of the
file with chown which only works in case the remote_user is
privileged or the remote systems allows chown calls by unprivileged
users (e.g. HP-UX)
* If the chown fails we can set the file to be world readable so that
the second unprivileged user can read the file.
Since this could allow other users to get access to private
information we only do this if ansible is configured with
"allow_world_readable_tmpfiles" in the ansible.cfg
"""
if remote_user is None:
remote_user = self._play_context.remote_user
if self._connection._shell.SHELL_FAMILY == 'powershell':
# This won't work on Powershell as-is, so we'll just completely skip until
# we have a need for it, at which point we'll have to do something different.
return remote_paths
if self._is_become_unprivileged():
# Unprivileged user that's different than the ssh user. Let's get
# to work!
# Try to use file system acls to make the files readable for sudo'd
# user
if execute:
chmod_mode = 'rx'
setfacl_mode = 'r-x'
else:
chmod_mode = 'rX'
# NOTE: this form fails silently on freebsd. We currently
# never call _fixup_perms2() with execute=False but if we
# start to we'll have to fix this.
setfacl_mode = 'r-X'
res = self._remote_set_user_facl(remote_paths, self._play_context.become_user, setfacl_mode)
if res['rc'] != 0:
# File system acls failed; let's try to use chown next
# Set executable bit first as on some systems an
# unprivileged user can use chown
if execute:
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
res = self._remote_chown(remote_paths, self._play_context.become_user)
if res['rc'] != 0 and remote_user in self._get_admin_users():
# chown failed even if remote_user is administrator/root
raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as a privileged user. '
'Unprivileged become user would be unable to read the file.')
elif res['rc'] != 0:
if C.ALLOW_WORLD_READABLE_TMPFILES:
# chown and fs acls failed -- do things this insecure
# way only if the user opted in in the config file
display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user. '
'This may be insecure. For information on securing this, see '
'https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
else:
raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user '
'(rc: %s, err: %s}). For information on working around this, see '
'https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'
% (res['rc'], to_native(res['stderr'])))
elif execute:
# Can't depend on the file being transferred with execute permissions.
# Only need user perms because no become was used here
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
raise AnsibleError('Failed to set execute bit on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
return remote_paths
def _remote_chmod(self, paths, mode, sudoable=False):
'''
Issue a remote chmod command
'''
cmd = self._connection._shell.chmod(paths, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_chown(self, paths, user, sudoable=False):
'''
Issue a remote chown command
'''
cmd = self._connection._shell.chown(paths, user)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_set_user_facl(self, paths, user, mode, sudoable=False):
'''
Issue a remote call to setfacl
'''
cmd = self._connection._shell.set_user_facl(paths, user, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _execute_remote_stat(self, path, all_vars, follow, tmp=None, checksum=True):
'''
Get information from remote file.
'''
if tmp is not None:
display.warning('_execute_remote_stat no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir')
del tmp # No longer used
module_args = dict(
path=path,
follow=follow,
get_checksum=checksum,
checksum_algo='sha1',
)
mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars,
wrap_async=False)
if mystat.get('failed'):
msg = mystat.get('module_stderr')
if not msg:
msg = mystat.get('module_stdout')
if not msg:
msg = mystat.get('msg')
raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, msg))
if not mystat['stat']['exists']:
# empty might be matched, 1 should never match, also backwards compatible
mystat['stat']['checksum'] = '1'
# happens sometimes when it is a dir and not on bsd
if 'checksum' not in mystat['stat']:
mystat['stat']['checksum'] = ''
elif not isinstance(mystat['stat']['checksum'], string_types):
raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum']))
return mystat['stat']
def _remote_checksum(self, path, all_vars, follow=False):
'''
Produces a remote checksum given a path,
Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
0 = unknown error
1 = file does not exist, this might not be an error
2 = permissions issue
3 = its a directory, not a file
4 = stat module failed, likely due to not finding python
5 = appropriate json module not found
'''
x = "0" # unknown error has occurred
try:
remote_stat = self._execute_remote_stat(path, all_vars, follow=follow)
if remote_stat['exists'] and remote_stat['isdir']:
x = "3" # its a directory not a file
else:
x = remote_stat['checksum'] # if 1, file is missing
except AnsibleError as e:
errormsg = to_text(e)
if errormsg.endswith(u'Permission denied'):
x = "2" # cannot read file
elif errormsg.endswith(u'MODULE FAILURE'):
x = "4" # python not found or module uncaught exception
elif 'json' in errormsg:
x = "5" # json module needed
finally:
return x # pylint: disable=lost-exception
def _remote_expand_user(self, path, sudoable=True, pathsep=None):
''' takes a remote path and performs tilde/$HOME expansion on the remote host '''
# We only expand ~/path and ~username/path
if not path.startswith('~'):
return path
# Per Jborean, we don't have to worry about Windows as we don't have a notion of user's home
# dir there.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
# Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
# As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
# This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
if getattr(self._connection, '_remote_is_local', False):
pass
elif sudoable and self._play_context.become and self._play_context.become_user:
expand_path = '~%s' % self._play_context.become_user
else:
# use remote user instead, if none set default to current user
expand_path = '~%s' % (self._play_context.remote_user or self._connection.default_user or '')
# use shell to construct appropriate command and execute
cmd = self._connection._shell.expand_user(expand_path)
data = self._low_level_execute_command(cmd, sudoable=False)
try:
initial_fragment = data['stdout'].strip().splitlines()[-1]
except IndexError:
initial_fragment = None
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Try using pwd, if not, return
# the original string
cmd = self._connection._shell.pwd()
pwd = self._low_level_execute_command(cmd, sudoable=False).get('stdout', '').strip()
if pwd:
expanded = pwd
else:
expanded = path
elif len(split_path) > 1:
expanded = self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
expanded = initial_fragment
return expanded
def _strip_success_message(self, data):
'''
Removes the BECOME-SUCCESS message from the data.
'''
if data.strip().startswith('BECOME-SUCCESS-'):
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
return data
def _update_module_args(self, module_name, module_args, task_vars):
# set check mode in the module arguments, if required
if self._play_context.check_mode:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
else:
module_args['_ansible_check_mode'] = False
# set no log in the module arguments, if required
module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG
# set debug in the module arguments, if required
module_args['_ansible_debug'] = C.DEFAULT_DEBUG
# let module know we are in diff mode
module_args['_ansible_diff'] = self._play_context.diff
# let module know our verbosity
module_args['_ansible_verbosity'] = display.verbosity
# give the module information about the ansible version
module_args['_ansible_version'] = __version__
# give the module information about its name
module_args['_ansible_module_name'] = module_name
# set the syslog facility to be used in the module
module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)
# let module know about filesystems that selinux treats specially
module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
# give the module the socket for persistent connections
module_args['_ansible_socket'] = getattr(self._connection, 'socket_path')
if not module_args['_ansible_socket']:
module_args['_ansible_socket'] = task_vars.get('ansible_socket')
# make sure all commands use the designated shell executable
module_args['_ansible_shell_executable'] = self._play_context.executable
# make sure modules are aware if they need to keep the remote files
module_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES
# make sure all commands use the designated temporary directory if created
if self._is_become_unprivileged(): # force fallback on remote_tmp as user cannot normally write to dir
module_args['_ansible_tmpdir'] = None
else:
module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir
# make sure the remote_tmp value is sent through in case modules needs to create their own
try:
module_args['_ansible_remote_tmp'] = self._connection._shell.get_option('remote_tmp')
except KeyError:
# here for 3rd party shell plugin compatibility in case they do not define the remote_tmp option
module_args['_ansible_remote_tmp'] = '~/.ansible/tmp'
def _update_connection_options(self, options, variables=None):
''' ensures connections have the appropriate information '''
update = {}
if getattr(self.connection, 'glob_option_vars', False):
# if the connection allows for it, pass any variables matching it.
if variables is not None:
for varname in variables:
if varname.match('ansible_%s_' % self.connection._load_name):
update[varname] = variables[varname]
# always override existing with options
update.update(options)
self.connection.set_options(update)
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=None, wrap_async=False):
'''
Transfer and run a module along with its arguments.
'''
if tmp is not None:
display.warning('_execute_module no longer honors the tmp parameter. Action plugins'
' should set self._connection._shell.tmpdir to share the tmpdir')
del tmp # No longer used
if delete_remote_tmp is not None:
display.warning('_execute_module no longer honors the delete_remote_tmp parameter.'
' Action plugins should check self._connection._shell.tmpdir to'
' see if a tmpdir existed before they were called to determine'
' if they are responsible for removing it.')
del delete_remote_tmp # No longer used
tmpdir = self._connection._shell.tmpdir
# We set the module_style to new here so the remote_tmp is created
# before the module args are built if remote_tmp is needed (async).
# If the module_style turns out to not be new and we didn't create the
# remote tmp here, it will still be created. This must be done before
# calling self._update_module_args() so the module wrapper has the
# correct remote_tmp value set
if not self._is_pipelining_enabled("new", wrap_async) and tmpdir is None:
self._make_tmp_path()
tmpdir = self._connection._shell.tmpdir
if task_vars is None:
task_vars = dict()
# if a module name was not specified for this execution, use the action from the task
if module_name is None:
module_name = self._task.action
if module_args is None:
module_args = self._task.args
self._update_module_args(module_name, module_args, task_vars)
# FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
(module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
display.vvv("Using module file %s" % module_path)
if not shebang and module_style != 'binary':
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
self._used_interpreter = shebang
remote_module_path = None
if not self._is_pipelining_enabled(module_style, wrap_async):
# we might need remote tmp dir
if tmpdir is None:
self._make_tmp_path()
tmpdir = self._connection._shell.tmpdir
remote_module_filename = self._connection._shell.get_remote_filename(module_path)
remote_module_path = self._connection._shell.join_path(tmpdir, 'AnsiballZ_%s' % remote_module_filename)
args_file_path = None
if module_style in ('old', 'non_native_want_json', 'binary'):
# we'll also need a tmp file to hold our module arguments
args_file_path = self._connection._shell.join_path(tmpdir, 'args')
if remote_module_path or module_style != 'new':
display.debug("transferring module to remote %s" % remote_module_path)
if module_style == 'binary':
self._transfer_file(module_path, remote_module_path)
else:
self._transfer_data(remote_module_path, module_data)
if module_style == 'old':
# we need to dump the module args to a k=v string in a file on
# the remote system, which can be read and parsed by the module
args_data = ""
for k, v in iteritems(module_args):
args_data += '%s=%s ' % (k, shlex_quote(text_type(v)))
self._transfer_data(args_file_path, args_data)
elif module_style in ('non_native_want_json', 'binary'):
self._transfer_data(args_file_path, json.dumps(module_args))
display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
remote_files = []
if tmpdir and remote_module_path:
remote_files = [tmpdir, remote_module_path]
if args_file_path:
remote_files.append(args_file_path)
sudoable = True
in_data = None
cmd = ""
if wrap_async and not self._connection.always_pipeline_modules:
# configure, upload, and chmod the async_wrapper module
(async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(module_name='async_wrapper', module_args=dict(),
task_vars=task_vars)
async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
remote_async_module_path = self._connection._shell.join_path(tmpdir, async_module_remote_filename)
self._transfer_data(remote_async_module_path, async_module_data)
remote_files.append(remote_async_module_path)
async_limit = self._task.async_val
async_jid = str(random.randint(0, 999999999999))
# call the interpreter for async_wrapper directly
# this permits use of a script for an interpreter on non-Linux platforms
# TODO: re-implement async_wrapper as a regular module to avoid this special case
interpreter = shebang.replace('#!', '').strip()
async_cmd = [interpreter, remote_async_module_path, async_jid, async_limit, remote_module_path]
if environment_string:
async_cmd.insert(0, environment_string)
if args_file_path:
async_cmd.append(args_file_path)
else:
# maintain a fixed number of positional parameters for async_wrapper
async_cmd.append('_')
if not self._should_remove_tmp_path(tmpdir):
async_cmd.append("-preserve_tmp")
cmd = " ".join(to_text(x) for x in async_cmd)
else:
if self._is_pipelining_enabled(module_style):
in_data = module_data
else:
cmd = remote_module_path
cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path).strip()
# Fix permissions of the tmpdir path and tmpdir files. This should be called after all
# files have been transferred.
if remote_files:
# remove none/empty
remote_files = [x for x in remote_files if x]
self._fixup_perms2(remote_files, self._play_context.remote_user)
# actually execute
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
# parse the main result
data = self._parse_returned_data(res)
# NOTE: INTERNAL KEYS ONLY ACCESSIBLE HERE
# get internal info before cleaning
if data.pop("_ansible_suppress_tmpdir_delete", False):
self._cleanup_remote_tmp = False
# remove internal keys
remove_internal_keys(data)
if wrap_async:
# async_wrapper will clean up its tmpdir on its own so we want the controller side to
# forget about it now
self._connection._shell.tmpdir = None
# FIXME: for backwards compat, figure out if still makes sense
data['changed'] = True
# pre-split stdout/stderr into lines if needed
if 'stdout' in data and 'stdout_lines' not in data:
# if the value is 'False', a default won't catch it.
txt = data.get('stdout', None) or u''
data['stdout_lines'] = txt.splitlines()
if 'stderr' in data and 'stderr_lines' not in data:
# if the value is 'False', a default won't catch it.
txt = data.get('stderr', None) or u''
data['stderr_lines'] = txt.splitlines()
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _parse_returned_data(self, res):
try:
filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''))
for w in warnings:
display.warning(w)
data = json.loads(filtered_output)
if 'ansible_facts' in data and isinstance(data['ansible_facts'], dict):
data['ansible_facts'] = wrap_var(data['ansible_facts'])
data['_ansible_parsed'] = True
except ValueError:
# not valid json, lets try to capture error
data = dict(failed=True, _ansible_parsed=False)
data['module_stdout'] = res.get('stdout', u'')
if 'stderr' in res:
data['module_stderr'] = res['stderr']
if res['stderr'].startswith(u'Traceback'):
data['exception'] = res['stderr']
# try to figure out if we are missing interpreter
if self._used_interpreter is not None and '%s: No such file or directory' % self._used_interpreter.lstrip('!#') in data['module_stderr']:
data['msg'] = "The module failed to execute correctly, you probably need to set the interpreter."
else:
data['msg'] = "MODULE FAILURE"
data['msg'] += '\nSee stdout/stderr for the exact error'
if 'rc' in res:
data['rc'] = res['rc']
return data
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='surrogate_then_replace', chdir=None):
'''
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
:kwarg encoding_errors: If the value returned by the command isn't
utf-8 then we have to figure out how to transform it to unicode.
If the value is just going to be displayed to the user (or
discarded) then the default of 'replace' is fine. If the data is
used as a key or is going to be written back out to a file
verbatim, then this won't work. May have to use some sort of
replacement strategy (python3 could use surrogateescape)
:kwarg chdir: cd into this directory before executing the command.
'''
display.debug("_low_level_execute_command(): starting")
# if not cmd:
# # this can happen with powershell modules when there is no analog to a Windows command (like chmod)
# display.debug("_low_level_execute_command(): no command, exiting")
# return dict(stdout='', stderr='', rc=254)
if chdir:
display.debug("_low_level_execute_command(): changing cwd to %s for this command" % chdir)
cmd = self._connection._shell.append_command('cd %s' % chdir, cmd)
allow_same_user = C.BECOME_ALLOW_SAME_USER
same_user = self._play_context.become_user == self._play_context.remote_user
if sudoable and self._play_context.become and (allow_same_user or not same_user):
display.debug("_low_level_execute_command(): using become for this command")
if self._connection.transport != 'network_cli' and self._play_context.become_method != 'enable':
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
if self._connection.allow_executable:
if executable is None:
executable = self._play_context.executable
# mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876)
# only applied for the default executable to avoid interfering with the raw action
cmd = self._connection._shell.append_command(cmd, 'sleep 0')
if executable:
cmd = executable + ' -c ' + shlex_quote(cmd)
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
# Change directory to basedir of task for command execution when connection is local
if self._connection.transport == 'local':
cwd = os.getcwd()
os.chdir(self._loader.get_basedir())
try:
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
finally:
if self._connection.transport == 'local':
os.chdir(cwd)
# stdout and stderr may be either a file-like or a bytes object.
# Convert either one to a text type
if isinstance(stdout, binary_type):
out = to_text(stdout, errors=encoding_errors)
elif not isinstance(stdout, text_type):
out = to_text(b''.join(stdout.readlines()), errors=encoding_errors)
else:
out = stdout
if isinstance(stderr, binary_type):
err = to_text(stderr, errors=encoding_errors)
elif not isinstance(stderr, text_type):
err = to_text(b''.join(stderr.readlines()), errors=encoding_errors)
else:
err = stderr
if rc is None:
rc = 0
# be sure to remove the BECOME-SUCCESS message now
out = self._strip_success_message(out)
display.debug(u"_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, out, err))
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err, stderr_lines=err.splitlines())
def _get_diff_data(self, destination, source, task_vars, source_file=True):
diff = {}
display.debug("Going to peek to see if file has changed permissions")
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, _diff_peek=True), task_vars=task_vars, persist_files=True)
if not peek_result.get('failed', False) or peek_result.get('rc', 0) == 0:
if peek_result.get('state') == 'absent':
diff['before'] = ''
elif peek_result.get('appears_binary'):
diff['dst_binary'] = 1
elif peek_result.get('size') and C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Slurping the file %s" % source)
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise AnsibleError("unknown encoding in content option, failed: %s" % dest_result)
diff['before_header'] = destination
diff['before'] = dest_contents
if source_file:
st = os.stat(source)
if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Reading local copy of the file %s" % source)
try:
with open(source, 'rb') as src:
src_contents = src.read()
except Exception as e:
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
if b"\x00" in src_contents:
diff['src_binary'] = 1
else:
diff['after_header'] = source
diff['after'] = src_contents
else:
display.debug("source of file passed in")
diff['after_header'] = 'dynamically generated'
diff['after'] = source
if self._play_context.no_log:
if 'before' in diff:
diff["before"] = ""
if 'after' in diff:
diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]\n"
return diff
def _find_needle(self, dirname, needle):
'''
find a needle in haystack of paths, optionally using 'dirname' as a subdir.
This will build the ordered list of paths to search and pass them to dwim
to get back the first existing file found.
'''
# dwim already deals with playbook basedirs
path_stack = self._task.get_search_path()
# if missing it will return a file not found exception
return self._loader.path_dwim_relative_stack(path_stack, dirname, needle)<|fim▁end|> | def _configure_module(self, module_name, module_args, task_vars=None):
''' |
<|file_name|>AboutPage.js<|end_file_name|><|fim▁begin|>import React from 'react';
import {Link} from 'react-router';
import '../../styles/about-page.css';
<|fim▁hole|> return (
<div>
<h2 className="alt-header">About</h2>
<p>
This example app is part of the <a href="https://github.com/coryhouse/react-slingshot">React-Slingshot
starter kit</a>.
</p>
<p>
<Link to="/badlink">Click this bad link</Link> to see the 404 page.
</p>
</div>
);
};
export default AboutPage;<|fim▁end|> | // Since this component is simple and static, there's no parent container for it.
const AboutPage = () => { |
<|file_name|>AllMembershipGroup.js<|end_file_name|><|fim▁begin|>module.exports = {
attributes: {
group: {
model: 'Group'
},
user: {
model: 'User'
},
synchronized: 'boolean',<|fim▁hole|> child_group: {
model: 'Group'
},
level: 'integer'
},
migrate: 'safe',
tableName: 'all_membership_group',
autoUpdatedAt: false,
autoCreatedAt: false
};<|fim▁end|> | active: 'boolean', |
<|file_name|>merchant.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time, sys, socket, os
import threading
import urllib2
import json
import Queue
import sqlite3
import electrum_doge as electrum
electrum.set_verbosity(False)
import ConfigParser
config = ConfigParser.ConfigParser()
config.read("merchant.conf")
my_password = config.get('main','password')
my_host = config.get('main','host')<|fim▁hole|>received_url = config.get('callback','received')
expired_url = config.get('callback','expired')
cb_password = config.get('callback','password')
wallet_path = config.get('electrum','wallet_path')
xpub = config.get('electrum','xpub')
pending_requests = {}
num = 0
def check_create_table(conn):
global num
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='electrum_payments';")
data = c.fetchall()
if not data:
c.execute("""CREATE TABLE electrum_payments (address VARCHAR(40), amount FLOAT, confirmations INT(8), received_at TIMESTAMP, expires_at TIMESTAMP, paid INT(1), processed INT(1));""")
conn.commit()
c.execute("SELECT Count(address) FROM 'electrum_payments'")
num = c.fetchone()[0]
print "num rows", num
def row_to_dict(x):
return {
'id':x[0],
'address':x[1],
'amount':x[2],
'confirmations':x[3],
'received_at':x[4],
'expires_at':x[5],
'paid':x[6],
'processed':x[7]
}
# this process detects when addresses have received payments
def on_wallet_update():
for addr, v in pending_requests.items():
h = wallet.history.get(addr, [])
requested_amount = v.get('requested')
requested_confs = v.get('confirmations')
value = 0
for tx_hash, tx_height in h:
tx = wallet.transactions.get(tx_hash)
if not tx: continue
if wallet.verifier.get_confirmations(tx_hash) < requested_confs: continue
for o in tx.outputs:
o_address, o_value = o
if o_address == addr:
value += o_value
s = (value)/1.e8
print "balance for %s:"%addr, s, requested_amount
if s>= requested_amount:
print "payment accepted", addr
out_queue.put( ('payment', addr))
stopping = False
def do_stop(password):
global stopping
if password != my_password:
return "wrong password"
stopping = True
return "ok"
def process_request(amount, confirmations, expires_in, password):
global num
if password != my_password:
return "wrong password"
try:
amount = float(amount)
confirmations = int(confirmations)
expires_in = float(expires_in)
except Exception:
return "incorrect parameters"
account = wallet.default_account()
addr = account.get_address(0, num)
num += 1
out_queue.put( ('request', (addr, amount, confirmations, expires_in) ))
return addr
def do_dump(password):
if password != my_password:
return "wrong password"
conn = sqlite3.connect(database);
cur = conn.cursor()
# read pending requests from table
cur.execute("SELECT oid, * FROM electrum_payments;")
data = cur.fetchall()
return map(row_to_dict, data)
def getrequest(oid, password):
oid = int(oid)
conn = sqlite3.connect(database);
cur = conn.cursor()
# read pending requests from table
cur.execute("SELECT oid, * FROM electrum_payments WHERE oid=%d;"%(oid))
data = cur.fetchone()
return row_to_dict(data)
def send_command(cmd, params):
import jsonrpclib
server = jsonrpclib.Server('http://%s:%d'%(my_host, my_port))
try:
f = getattr(server, cmd)
except socket.error:
print "Server not running"
return 1
try:
out = f(*params)
except socket.error:
print "Server not running"
return 1
print json.dumps(out, indent=4)
return 0
def db_thread():
conn = sqlite3.connect(database);
# create table if needed
check_create_table(conn)
while not stopping:
cur = conn.cursor()
# read pending requests from table
cur.execute("SELECT address, amount, confirmations FROM electrum_payments WHERE paid IS NULL;")
data = cur.fetchall()
# add pending requests to the wallet
for item in data:
addr, amount, confirmations = item
if addr in pending_requests:
continue
else:
with wallet.lock:
print "subscribing to %s"%addr
pending_requests[addr] = {'requested':float(amount), 'confirmations':int(confirmations)}
wallet.synchronizer.subscribe_to_addresses([addr])
wallet.up_to_date = False
try:
cmd, params = out_queue.get(True, 10)
except Queue.Empty:
cmd = ''
if cmd == 'payment':
addr = params
# set paid=1 for received payments
print "received payment from", addr
cur.execute("update electrum_payments set paid=1 where address='%s'"%addr)
elif cmd == 'request':
# add a new request to the table.
addr, amount, confs, minutes = params
sql = "INSERT INTO electrum_payments (address, amount, confirmations, received_at, expires_at, paid, processed)"\
+ " VALUES ('%s', %f, %d, datetime('now'), datetime('now', '+%d Minutes'), NULL, NULL);"%(addr, amount, confs, minutes)
print sql
cur.execute(sql)
# set paid=0 for expired requests
cur.execute("""UPDATE electrum_payments set paid=0 WHERE expires_at < CURRENT_TIMESTAMP and paid is NULL;""")
# do callback for addresses that received payment or expired
cur.execute("""SELECT oid, address, paid from electrum_payments WHERE paid is not NULL and processed is NULL;""")
data = cur.fetchall()
for item in data:
oid, address, paid = item
paid = bool(paid)
headers = {'content-type':'application/json'}
data_json = { 'address':address, 'password':cb_password, 'paid':paid }
data_json = json.dumps(data_json)
url = received_url if paid else expired_url
if not url:
continue
req = urllib2.Request(url, data_json, headers)
try:
response_stream = urllib2.urlopen(req)
print 'Got Response for %s' % address
cur.execute("UPDATE electrum_payments SET processed=1 WHERE oid=%d;"%(oid))
except urllib2.HTTPError:
print "cannot do callback", data_json
except ValueError, e:
print e
print "cannot do callback", data_json
conn.commit()
conn.close()
print "database closed"
if __name__ == '__main__':
if len(sys.argv) > 1:
cmd = sys.argv[1]
params = sys.argv[2:] + [my_password]
ret = send_command(cmd, params)
sys.exit(ret)
# start network
c = electrum.SimpleConfig({'wallet_path':wallet_path})
daemon_socket = electrum.daemon.get_daemon(c,True)
network = electrum.NetworkProxy(daemon_socket,config)
network.start()
# wait until connected
while network.is_connecting():
time.sleep(0.1)
if not network.is_connected():
print_msg("daemon is not connected")
sys.exit(1)
# create watching_only wallet
storage = electrum.WalletStorage(c)
if not storage.file_exists:
print "creating wallet file"
wallet = electrum.wallet.Wallet.from_xpub(xpub, storage)
else:
wallet = electrum.wallet.Wallet(storage)
wallet.synchronize = lambda: None # prevent address creation by the wallet
wallet.start_threads(network)
network.register_callback('updated', on_wallet_update)
threading.Thread(target=db_thread, args=()).start()
out_queue = Queue.Queue()
# server thread
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
server = SimpleJSONRPCServer(( my_host, my_port))
server.register_function(process_request, 'request')
server.register_function(do_dump, 'dump')
server.register_function(getrequest, 'getrequest')
server.register_function(do_stop, 'stop')
server.socket.settimeout(1)
while not stopping:
try:
server.handle_request()
except socket.timeout:
continue<|fim▁end|> | my_port = config.getint('main','port')
database = config.get('sqlite3','database')
|
<|file_name|>ListContainersCmdHeaderTest.java<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.docker.headers;
import java.util.Map;
import com.github.dockerjava.api.command.ListContainersCmd;
import org.apache.camel.component.docker.DockerConstants;
import org.apache.camel.component.docker.DockerOperation;
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
/**
* Validates List Containers Request headers are applied properly
*/
public class ListContainersCmdHeaderTest extends BaseDockerHeaderTest<ListContainersCmd> {
@Mock
private ListContainersCmd mockObject;
@Test
public void listContainerHeaderTest() {
boolean showSize = true;
boolean showAll = false;
int limit = 2;
String since = "id1";
String before = "id2";
Map<String, Object> headers = getDefaultParameters();
headers.put(DockerConstants.DOCKER_LIMIT, limit);
headers.put(DockerConstants.DOCKER_SHOW_ALL, showAll);
headers.put(DockerConstants.DOCKER_SHOW_SIZE, showSize);
headers.put(DockerConstants.DOCKER_SINCE, since);
headers.put(DockerConstants.DOCKER_BEFORE, before);
template.sendBodyAndHeaders("direct:in", "", headers);
Mockito.verify(dockerClient, Mockito.times(1)).listContainersCmd();
Mockito.verify(mockObject, Mockito.times(1)).withShowAll(Matchers.eq(showAll));
Mockito.verify(mockObject, Mockito.times(1)).withShowSize(Matchers.eq(showSize));
Mockito.verify(mockObject, Mockito.times(1)).withLimit(Matchers.eq(limit));<|fim▁hole|>
@Override
protected void setupMocks() {
Mockito.when(dockerClient.listContainersCmd()).thenReturn(mockObject);
}
@Override
protected DockerOperation getOperation() {
return DockerOperation.LIST_CONTAINERS;
}
}<|fim▁end|> | Mockito.verify(mockObject, Mockito.times(1)).withSince(Matchers.eq(since));
Mockito.verify(mockObject, Mockito.times(1)).withBefore(Matchers.eq(before));
} |
<|file_name|>mdn.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from collections import defaultdict
import json
import logging
from ._compat import ElementTree, urlopen
MDN_SITEMAP = 'https://developer.mozilla.org/sitemaps/en-US/sitemap.xml'
SITEMAP_NS = 'http://www.sitemaps.org/schemas/sitemap/0.9'
log = logging.getLogger(__name__)
def parse():
"""
Generate a cross-reference dictionary for the MDN JavaScript Reference.
:rtype: dict
"""
with urlopen(MDN_SITEMAP) as f:
xml = ElementTree.parse(f)
refs = defaultdict(dict)
for loc in xml.iterfind('{{{ns}}}url/{{{ns}}}loc'.format(ns=SITEMAP_NS)):
url = loc.text
if 'JavaScript/Reference/Global_Objects/' not in url:<|fim▁hole|> if len(parts) == 1:
name = parts[0]
if name[0].isupper():
ref_type = 'class'
else:
ref_type = 'data'
elif len(parts) == 2:
cls, attr = parts
with urlopen('{url}$json'.format(url=url)) as f:
metadata = json.loads(f.read().decode('utf-8'))
name = '{0}.{1}'.format(cls, attr)
if 'Method' in metadata['tags']:
ref_type = 'function'
elif 'Property' in metadata['tags']:
ref_type = 'attribute'
else:
fmt = 'Unknown ref_type for {0}. Tags: {1}'
log.warning(fmt.format(url, ', '.join(metadata['tags'])))
continue
else:
log.warning('Skipping URL (too many parts): {0}'.format(url))
continue
refs[ref_type][name] = url_suffix
return dict(refs)<|fim▁end|> | continue
url_suffix = url[81:]
parts = url_suffix.split('/') |
<|file_name|>v5.py<|end_file_name|><|fim▁begin|>from common2 import *
# NAME IDEA -> pooling/random/sparse/distributed hebbian/horde/crowd/fragment/sample memory
# FEATURES:
# + boost -- neurons with empty mem slots learn faster
# + noise --
# + dropout -- temporal disabling of neurons
# + decay -- remove from mem
# + negatives -- learning to avoid detecting some patterns
# + fatigue -- winner has lower score for some time
# ~ sklearn -- compatibile api
# - prune -- if input < mem shrink mem ? (problem with m > input len)
# - weights -- sample weights for imbalanced classes
# - popularity -- most popular neuron is cloned / killed
# NEXT VERSION:
# - attention
# - https://towardsdatascience.com/the-fall-of-rnn-lstm-2d1594c74ce0
# - https://towardsdatascience.com/memory-attention-sequences-37456d271992
# - https://medium.com/breathe-publication/neural-networks-building-blocks-a5c47bcd7c8d
# - https://distill.pub/2016/augmented-rnns/
# - http://akosiorek.github.io/ml/2017/10/14/visual-attention.html
# + IDEA:
# append activated neurons indexes to queue available as input
# queue ages at constant rate and drops oldest values
# - IDEA:
# each neuron has small memory of activation prior to winning
# this memory is compared to ctx and intersection added to score
# winner updated this memory
# OPTION: several memories with diferent time frames
# NEXT VERSION:
# - layers -- rsm stacking
# NEXT VERSIONS:
# - numpy -- faster version
# - cython -- faster version
# - gpu -- faster version
# - distributed
class rsm:
def __init__(self,n,m,c=0,**kw):
"""Random Sample Memory
n -- number of neurons
m -- max connections per neuron (memory)
"""
self.mem = {j:set() for j in range(n)}
self.win = {j:0 for j in range(n)}
self.tow = {j:-42000 for j in range(n)} # time of win
self.t = 0
self.ctx = deque(maxlen=c) # context queue
# cfg
cfg = {}
cfg['n'] = n
cfg['m'] = m
cfg['c'] = c
cfg['k'] = kw.get('k',1)
cfg['method'] = kw.get('method',1)
cfg['cutoff'] = kw.get('cutoff',0.5)
cfg['decay'] = kw.get('decay',0.0)
cfg['dropout'] = kw.get('dropout',0.0)
cfg['fatigue'] = kw.get('fatigue',0)
cfg['boost'] = kw.get('boost',True)
cfg['noise'] = kw.get('noise',True)
cfg['sequence'] = kw.get('sequence',False)
cfg.update(kw)
self.cfg = cfg
# ---[ core ]---------------------------------------------------------------
def new_ctx(self):
self.ctx.clear()
# TODO -- input length vs mem length
# TODO -- args from cfg
def scores(self, input, raw=False, boost=False, noise=False, fatigue=0, dropout=0.0, **ignore): # -> dict[i] -> scores
"""
input -- sparse binary features
raw -- disable all postprocessing
boost -- improve scores based on number of unconnected synapses (TODO)
noise -- randomize scores to prevent snowballing
dropout -- temporal disabling of neurons
"""
mem = self.mem
tow = self.tow
N = self.cfg['n']
M = self.cfg['m']
t = self.t
scores = {}
for j in mem:
scores[j] = len(set(input) & mem[j])
if raw:
return scores
if noise:
for j in mem:
scores[j] += 0.9*random()
if boost:
for j in mem:
scores[j] += 1+2*(M-len(mem[j])) if len(mem[j])<M else 0
# TODO boost also based on low win ratio / low tow
if fatigue:
for j in mem:
dt = 1.0*min(fatigue,t - tow[j])
factor = dt / fatigue
scores[j] *= factor
if dropout:
k = int(round(float(dropout)*N))
for j in combinations(N,k):
scores[j] = -1
return scores
def learn(self, input, negative=False, **ignore):
for i in range(0,len(input),10):
self.learn_(set(input[i:i+10]),negative=negative)
def learn_(self, input, negative=False, **ignore):
"""
input -- sparse binary features
k -- number of winning neurons
"""
mem = self.mem
win = self.win
tow = self.tow
ctx = self.ctx
t = self.t
cfg = self.cfg
M = self.cfg['m']
N = self.cfg['n']
k = self.cfg['k']
decay = self.cfg['decay']
sequence = self.cfg['sequence']
known_inputs = set()
for j in mem:
known_inputs.update(mem[j])
# context
input = input | set(ctx)
# scoring
scores = self.scores(input, **cfg)
winners = top(k,scores)
for j in winners:
# negative learning
if negative:
mem[j].difference_update(input)
continue
# positive learning
unknown_inputs = input - known_inputs
mem[j].update(pick(unknown_inputs, M-len(mem[j])))
known_inputs.update(mem[j])
# handle decay
if decay:
decay_candidates = mem[j] - input
if decay_candidates:
for d in decay_candidates:
if random() < decay:
mem[j].remove(d)
# handle popularity
win[j] += 1
# handle fatigue
tow[j] = t
# handle context
if sequence:
for i in range(len(ctx)):
ctx[i] -= N
for j in winners:
ctx.append(-j-1)
self.t += 1
# ---[ auxiliary ]----------------------------------------------------------<|fim▁hole|> negative = not y
self.learn(x,negative=negative,**cfg)
def fit2(self, X1, X0):
cfg = self.cfg
# TODO - unbalanced
for x1,x0 in zip(X1,X0):
self.learn(x1,negative=False,**cfg)
self.learn(x0,negative=True,**cfg)
def transform(self, X):
cutoff = self.cfg['cutoff']
out = []
for s in self.score_many(X):
y = 1 if s>=cutoff else 0
out += [y]
return out
def fit_transform(self, X, Y):
self.fit(X,Y)
return self.transform(X)
def score(self, X, Y, kind='acc'):
c = self.confusion(X,Y)
p = float(c['p'])
n = float(c['n'])
tp = float(c['tp'])
tn = float(c['tn'])
fp = float(c['fp'])
fn = float(c['fn'])
try:
if kind=='acc':
return (tp + tn) / (p + n)
elif kind=='f1':
return (2*tp) / (2*tp + fp + fn)
elif kind=='prec':
return tp / (tp + fp)
elif kind=='sens':
return tp / (tp + fn)
elif kind=='spec':
return tn / (tn + fp)
except ZeroDivisionError:
return float('nan')
def confusion(self, X, Y):
PY = self.transform(X)
p = 0
n = 0
tp = 0
tn = 0
fp = 0
fn = 0
for y,py in zip(Y,PY):
if y: p+=1
else: n+=1
if y:
if py: tp+=1
else: fn+=1
else:
if py: fp+=1
else: tn+=1
return dict(p=p,n=n,tp=tp,tn=tn,fp=fp,fn=fn)
def score_many(self, X):
out = []
for x in X:
s = self.score_one(x)
out += [s]
return out
# TODO
def calibrate(self, X, Y, kind='f1'):
for i in range(1,20):
c = 0.05*i
self.set_params(cutoff=c)
s = self.score(X,Y,kind)
print'{} {:.3} -> {:.3}'.format(kind,c,s)
def score_one(self, input):
"aggregate scores to scalar"
k = self.cfg['k']
method = self.cfg['method']
scores = self.scores(input)
M = self.cfg['m']
if method==0:
return top(k, scores, values=True)
elif method==1:
score = 1.0*sum(top(k, scores, values=True))/(k*(M+1))
return score
elif method==2:
score = 1.0*sum(top(k, scores, values=True))/(k*M)
return min(1.0,score)
if method==3:
score = 1.0*min(top(k, scores, values=True))/(M+1)
return score
elif method==4:
score = 1.0*min(top(k, scores, values=True))/M
return min(1.0,score)
if method==5:
score = 1.0*max(top(k, scores, values=True))/(M+1)
return score
elif method==6:
score = 1.0*max(top(k, scores, values=True))/M
return min(1.0,score)
def stats(self,prefix=''):
N = self.cfg['n']
M = self.cfg['m']
mem_v = self.mem.values()
out = {}
# mem
out['mem_empty'] = sum([1.0 if len(x)==0 else 0.0 for x in mem_v])/N
out['mem_not_empty'] = sum([1.0 if len(x)>0 else 0.0 for x in mem_v])/N
out['mem_full'] = sum([1.0 if len(x)==M else 0.0 for x in mem_v])/N
out['mem_avg'] = sum([1.0*len(x) for x in mem_v])/(N*M)
# win
win = list(sorted(self.win.values()))
out['win_min'] = win[0]
out['win_max'] = win[-1]
gini = 0
for a in win:
for b in win:
gini += abs(a-b)
gini = float(gini)/(2.0*len(win)*sum(win))
out['win_gini'] = round(gini,3)
# ctx
out['ctx_mem_sum'] = sum([1 if x<0 else 0 for m in mem_v for x in m])
out['ctx_mem_cnt'] = sum([max([1 if x<0 else 0 for x in m]) for m in mem_v if m])
out['ctx_mem_max'] = max([sum([1 if x<0 else 0 for x in m]) for m in mem_v if m])
#
return {k:v for k,v in out.items() if k.startswith(prefix)}
def set_params(self,**kw):
self.cfg.update(kw)
# TODO: deep parameter
def get_params(self,deep=True):
return self.cfg # TODO copy ???<|fim▁end|> |
def fit(self, X, Y):
cfg = self.cfg
for x,y in zip(X,Y): |
<|file_name|>uri.py<|end_file_name|><|fim▁begin|># vi: ts=8 sts=4 sw=4 et
#
# uri.py: various URI related utilties
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
import os
import os.path
import re
import stat
# URL/Form encoding
safe_chars = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
def quote_hex(s, safe=''):
"""
Replace potentially unsafe characters in 's' with their %XX hexadecimal
counterparts. You can pass additional safe characters in `safe'.
"""
res = list(s)
safe += safe_chars
for i in range(len(s)):
c = res[i]
if c not in safe:
res[i] = '%%%02X' % ord(c)
return ''.join(res)
def unquote_hex(s):
"""
Change %XX occurences in `s' with their character value.
Does the opposite of quote_url().
"""
lst = s.split('%')
res = [lst[0]]
for s in lst[1:]:
if len(s) >= 2:
try:
c = chr(int(s[:2], 16))
res.append(c + s[2:])
except ValueError:
res.append('%' + s)
else:
res.append('%' + s)
return ''.join(res)
def quote_url(s):
"""URL encode a string."""
if isinstance(s, unicode):
s = s.encode('utf-8')
return quote_hex(s, '/')
def unquote_url(s):
"""Decode an URL encoded string."""
s = unquote_hex(s)
s = s.decode('utf-8')
return s
def quote_form(s):
"""Form encode a string."""
if isinstance(s, unicode):
s = s.encode('utf-8')
s = quote_hex(s, ' ')
s = s.replace(' ', '+')
return s
def unquote_form(s):
"""Decode a form encoded string."""
s = s.replace('+', ' ')
s = unquote_hex(s)
s = s.decode('utf-8')
return s
# URI parsing
re_uri = re.compile('(?:([^:/?]*):)?(?://([^?/]*))?(?:/?([^?]*))(?:\?(.*))?')
def parse_uri(uri):
"""Parse an URI into its components.
The result is a 4-tuple (scheme, host, path, query).
Note: This function only supports the "hier_part" URL format as
defined in RFC2396 section 3. The "opaque_part" format is not
supported.
"""
mobj = re_uri.match(uri)
assert mobj
result = list(mobj.groups())
for i,value in enumerate(result):
if result[i] is None:
result[i] = ''
return tuple(result)
def create_uri(scheme=None, host=None, path=None, query=None):
"""Create an URI from its components."""
uri = ''
if scheme:
uri += '%s:' % scheme
if host:
uri += '//%s' % host
if path:
uri += '/%s' % path
if query:
uri += '?%s' % query
return uri
def parse_path(path):
"""Parse the "path" component of an URI.
The result is a list of path components.
"""
parts = [ unquote_url(pa) for pa in path.split('/') if pa ]
return parts
def create_path(parts):
"""Create a "path" component of an URI.
This function is the reverse of parse_path().
"""
parts = [ quote_url(pa) for pa in parts ]
path = '/'.join(parts)
return path
def parse_query(query):
"""Parse the "query" component of an URI.
The result is a dictionary that maps a string key to a list with
one or more string values.
"""
args = {}
parts = query.split('&')
for pa in parts:
try:
name, value = pa.split('=')
except ValueError:
continue
name = unquote_form(name)
value = unquote_form(value)
try:
args[name].append(value)
except KeyError:
args[name] = [value]
return args
def create_query(args):
"""Create the "query" component of an URI.
This function is the reverse of parse_query().
"""
args = [ '%s=%s' % (quote_form(key), quote_form(value))
for key,value in args.items() ]
query = '&'.join(args)<|fim▁hole|>
# URL path resolution
class ResolutionError(Exception):
pass
def resolve_path_uri(path, docroot):
"""Resolves the path part of an URI.
The URI is resolved to the 3-tuple: directory, filename, pathinfo.
The filename component is either empty or a single path component,
and may or may not exist as a physical file. The pathinfo component
consists of zero or more path components.
"""
try:
st = os.stat(docroot)
except OSError:
st = None
if st is None or not stat.S_ISDIR(st.st_mode):
raise ResolutionError, 'Document root does not exist.'
directory = []
subdir = docroot
parts = [ unquote_url(part) for part in path.split('/') if part ]
for i in range(len(parts)):
part = parts[i]
if part in ('.', '..'):
raise ResolutionError, \
'Current or parent directory not allowed in URI.'
subdir = os.path.join(subdir, part)
try:
st = os.stat(subdir)
except OSError:
st = None
if st is None or not stat.S_ISDIR(st.st_mode):
filename = parts[i]
pathinfo = '/'.join(parts[i+1:])
break
directory.append(part)
else:
filename = ''
pathinfo = ''
directory = '/'.join(directory)
return (directory, filename, pathinfo)
def create_path_uri(directory, filename, pathinfo):
"""Create a path URI from a 3-tuple (directory, filename, pathinfo)."""
parts = []
if directory:
parts.append(directory)
if filename:
parts.append(filename)
if pathinfo:
parts += [ part for part in pathinfo.split('/') if part ]
parts = [ quote_url(part) for part in parts ]
path = '/'.join(parts)
return path<|fim▁end|> | return query
|
<|file_name|>safemap_test.go<|end_file_name|><|fim▁begin|>// Copyright © 2011-12 Qtrac Ltd.
//
// This program or package and any associated files are licensed under the
// Apache License, Version 2.0 (the "License"); you may not use these files
// except in compliance with the License. You can get a copy of the License
// at: http://www.apache.org/licenses/LICENSE-2.0.
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package safemap_test
import (
"fmt"
"safemap"
"sync"
"testing"
)<|fim▁hole|>
func TestSafeMap(t *testing.T) {
store := safemap.New()
fmt.Printf("Initially has %d items\n", store.Len())
deleted := []int{0, 2, 3, 5, 7, 20, 399, 25, 30, 1000, 91, 97, 98, 99}
var waiter sync.WaitGroup
waiter.Add(1)
go func() { // Concurrent Inserter
for i := 0; i < 100; i++ {
store.Insert(fmt.Sprintf("0x%04X", i), i)
if i > 0 && i%15 == 0 {
fmt.Printf("Inserted %d items\n", store.Len())
}
}
fmt.Printf("Inserted %d items\n", store.Len())
waiter.Done()
}()
waiter.Add(1)
go func() { // Concurrent Deleter
for _, i := range deleted {
key := fmt.Sprintf("0x%04X", i)
before := store.Len()
store.Delete(key)
fmt.Printf("Deleted m[%s] (%d) before=%d after=%d\n",
key, i, before, store.Len())
}
waiter.Done()
}()
waiter.Add(1)
go func() { // Concurrent Finder
for _, i := range deleted {
for _, j := range []int{i, i + 1} {
key := fmt.Sprintf("0x%04X", j)
value, found := store.Find(key)
if found {
fmt.Printf("Found m[%s] == %d\n", key, value)
} else {
fmt.Printf("Not found m[%s] (%d)\n", key, j)
}
}
}
waiter.Done()
}()
waiter.Wait()
updater := func(value interface{}, found bool) interface{} {
if found {
return value.(int) * 1000
}
return 1
}
for _, i := range []int{5, 10, 15, 20, 25, 30, 35} {
key := fmt.Sprintf("0x%04X", i)
if value, found := store.Find(key); found {
fmt.Printf("Original m[%s] == %d\t", key, value)
store.Update(key, updater)
if value, found := store.Find(key); found {
fmt.Printf("Updated m[%s] == %5d\n", key, value)
}
}
}
fmt.Printf("Finished with %d items\n", store.Len())
// not needed here but useful if you want to free up the goroutine
data := store.Close()
fmt.Println("Closed")
fmt.Printf("len == %d\n", len(data))
//for k, v := range data { fmt.Printf("%s = %v\n", k, v) }
}<|fim▁end|> | |
<|file_name|>hg-to-git.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
""" hg-to-git.py - A Mercurial to GIT converter
Copyright (C)2007 Stelian Pop <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import os, os.path, sys
import tempfile, pickle, getopt
import re
if sys.hexversion < 0x02030000:
# The behavior of the pickle module changed significantly in 2.3
sys.stderr.write("hg-to-git.py: requires Python 2.3 or later.\n")
sys.exit(1)
# Maps hg version -> git version
hgvers = {}
# List of children for each hg revision
hgchildren = {}
# List of parents for each hg revision
hgparents = {}
# Current branch for each hg revision
hgbranch = {}
# Number of new changesets converted from hg
hgnewcsets = 0
#------------------------------------------------------------------------------
def usage():
print """\
%s: [OPTIONS] <hgprj>
options:
-s, --gitstate=FILE: name of the state to be saved/read
for incrementals
-n, --nrepack=INT: number of changesets that will trigger
a repack (default=0, -1 to deactivate)
-v, --verbose: be verbose
required:
hgprj: name of the HG project to import (directory)
""" % sys.argv[0]
#------------------------------------------------------------------------------
def getgitenv(user, date):
env = ''
elems = re.compile('(.*?)\s+<(.*)>').match(user)
if elems:
env += 'export GIT_AUTHOR_NAME="%s" ;' % elems.group(1)
env += 'export GIT_COMMITTER_NAME="%s" ;' % elems.group(1)
env += 'export GIT_AUTHOR_EMAIL="%s" ;' % elems.group(2)
env += 'export GIT_COMMITTER_EMAIL="%s" ;' % elems.group(2)
else:
env += 'export GIT_AUTHOR_NAME="%s" ;' % user
env += 'export GIT_COMMITTER_NAME="%s" ;' % user
env += 'export GIT_AUTHOR_EMAIL= ;'
env += 'export GIT_COMMITTER_EMAIL= ;'
env += 'export GIT_AUTHOR_DATE="%s" ;' % date
env += 'export GIT_COMMITTER_DATE="%s" ;' % date
return env
#------------------------------------------------------------------------------
state = ''
opt_nrepack = 0
verbose = False
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:n:v', ['gitstate=', 'tempdir=', 'nrepack=', 'verbose'])
for o, a in opts:
if o in ('-s', '--gitstate'):
state = a
state = os.path.abspath(state)
if o in ('-n', '--nrepack'):
opt_nrepack = int(a)
if o in ('-v', '--verbose'):
verbose = True
if len(args) != 1:
raise Exception('params')
except:
usage()
sys.exit(1)
hgprj = args[0]
os.chdir(hgprj)
if state:
if os.path.exists(state):
if verbose:
print 'State does exist, reading'
f = open(state, 'r')
hgvers = pickle.load(f)
else:
print 'State does not exist, first run'<|fim▁hole|>if sock.close():
sys.exit(1)
if verbose:
print 'tip is', tip
# Calculate the branches
if verbose:
print 'analysing the branches...'
hgchildren["0"] = ()
hgparents["0"] = (None, None)
hgbranch["0"] = "master"
for cset in range(1, int(tip) + 1):
hgchildren[str(cset)] = ()
prnts = os.popen('hg log -r %d --template "{parents}"' % cset).read().strip().split(' ')
prnts = map(lambda x: x[:x.find(':')], prnts)
if prnts[0] != '':
parent = prnts[0].strip()
else:
parent = str(cset - 1)
hgchildren[parent] += ( str(cset), )
if len(prnts) > 1:
mparent = prnts[1].strip()
hgchildren[mparent] += ( str(cset), )
else:
mparent = None
hgparents[str(cset)] = (parent, mparent)
if mparent:
# For merge changesets, take either one, preferably the 'master' branch
if hgbranch[mparent] == 'master':
hgbranch[str(cset)] = 'master'
else:
hgbranch[str(cset)] = hgbranch[parent]
else:
# Normal changesets
# For first children, take the parent branch, for the others create a new branch
if hgchildren[parent][0] == str(cset):
hgbranch[str(cset)] = hgbranch[parent]
else:
hgbranch[str(cset)] = "branch-" + str(cset)
if not hgvers.has_key("0"):
print 'creating repository'
os.system('git init')
# loop through every hg changeset
for cset in range(int(tip) + 1):
# incremental, already seen
if hgvers.has_key(str(cset)):
continue
hgnewcsets += 1
# get info
log_data = os.popen('hg log -r %d --template "{tags}\n{date|date}\n{author}\n"' % cset).readlines()
tag = log_data[0].strip()
date = log_data[1].strip()
user = log_data[2].strip()
parent = hgparents[str(cset)][0]
mparent = hgparents[str(cset)][1]
#get comment
(fdcomment, filecomment) = tempfile.mkstemp()
csetcomment = os.popen('hg log -r %d --template "{desc}"' % cset).read().strip()
os.write(fdcomment, csetcomment)
os.close(fdcomment)
print '-----------------------------------------'
print 'cset:', cset
print 'branch:', hgbranch[str(cset)]
print 'user:', user
print 'date:', date
print 'comment:', csetcomment
if parent:
print 'parent:', parent
if mparent:
print 'mparent:', mparent
if tag:
print 'tag:', tag
print '-----------------------------------------'
# checkout the parent if necessary
if cset != 0:
if hgbranch[str(cset)] == "branch-" + str(cset):
print 'creating new branch', hgbranch[str(cset)]
os.system('git checkout -b %s %s' % (hgbranch[str(cset)], hgvers[parent]))
else:
print 'checking out branch', hgbranch[str(cset)]
os.system('git checkout %s' % hgbranch[str(cset)])
# merge
if mparent:
if hgbranch[parent] == hgbranch[str(cset)]:
otherbranch = hgbranch[mparent]
else:
otherbranch = hgbranch[parent]
print 'merging', otherbranch, 'into', hgbranch[str(cset)]
os.system(getgitenv(user, date) + 'git merge --no-commit -s ours "" %s %s' % (hgbranch[str(cset)], otherbranch))
# remove everything except .git and .hg directories
os.system('find . \( -path "./.hg" -o -path "./.git" \) -prune -o ! -name "." -print | xargs rm -rf')
# repopulate with checkouted files
os.system('hg update -C %d' % cset)
# add new files
os.system('git ls-files -x .hg --others | git update-index --add --stdin')
# delete removed files
os.system('git ls-files -x .hg --deleted | git update-index --remove --stdin')
# commit
os.system(getgitenv(user, date) + 'git commit --allow-empty -a -F %s' % filecomment)
os.unlink(filecomment)
# tag
if tag and tag != 'tip':
os.system(getgitenv(user, date) + 'git tag %s' % tag)
# delete branch if not used anymore...
if mparent and len(hgchildren[str(cset)]):
print "Deleting unused branch:", otherbranch
os.system('git branch -d %s' % otherbranch)
# retrieve and record the version
vvv = os.popen('git show --quiet --pretty=format:%H').read()
print 'record', cset, '->', vvv
hgvers[str(cset)] = vvv
if hgnewcsets >= opt_nrepack and opt_nrepack != -1:
os.system('git repack -a -d')
# write the state for incrementals
if state:
if verbose:
print 'Writing state'
f = open(state, 'w')
pickle.dump(hgvers, f)
# vim: et ts=8 sw=4 sts=4<|fim▁end|> |
sock = os.popen('hg tip --template "{rev}"')
tip = sock.read() |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub use self::d3d11_0::*;
mod d3d11_0;
#[cfg(feature = "d3d11_1")]
pub use self::d3d11_1::*;
#[cfg(feature = "d3d11_1")]
mod d3d11_1;
#[cfg(feature = "d3d11_2")]
pub use self::d3d11_2::*;
#[cfg(feature = "d3d11_2")]
mod d3d11_2;
<|fim▁hole|>mod d3d11_3;<|fim▁end|> | #[cfg(feature = "d3d11_3")]
pub use self::d3d11_3::*;
#[cfg(feature = "d3d11_3")] |
<|file_name|>ItemCompShovel.java<|end_file_name|><|fim▁begin|>package com.iblowuptnt.tntMods.item;
import com.google.common.collect.Sets;
import com.iblowuptnt.tntMods.creativetab.CreativeTab;
import com.iblowuptnt.tntMods.reference.Material;
import com.iblowuptnt.tntMods.reference.Name;
import com.iblowuptnt.tntMods.reference.Textures;
import cpw.mods.fml.relauncher.Side;
import cpw.mods.fml.relauncher.SideOnly;<|fim▁hole|>import net.minecraft.item.ItemSpade;
import net.minecraft.init.Blocks;
import net.minecraft.item.ItemStack;
import java.util.Set;
public class ItemCompShovel extends ItemSpade
{
public ItemCompShovel()
{
super(Material.Tools.COMP_DIAMOND);
this.setCreativeTab(CreativeTab.TNT_TAB);
this.setUnlocalizedName(Name.Tools.COMP_SHOVEL);
this.maxStackSize = 1;
}
protected String getUnwrappedUnlocalizedName(String unlocalizedName)
{
return unlocalizedName.substring(unlocalizedName.indexOf(".") + 1);
}
@Override
public String getUnlocalizedName()
{
return String.format("item.%s%s", Textures.MOD_ID_PREFIX, getUnwrappedUnlocalizedName(super.getUnlocalizedName()));
}
@Override
public String getUnlocalizedName(ItemStack itemStack)
{
return String.format("item.%s%s", Textures.MOD_ID_PREFIX, getUnwrappedUnlocalizedName(super.getUnlocalizedName()));
}
@Override
@SideOnly(Side.CLIENT)
public void registerIcons(IIconRegister iconRegister)
{
itemIcon = iconRegister.registerIcon(this.getUnlocalizedName().substring(this.getUnlocalizedName().indexOf(".") + 1));
}
}<|fim▁end|> | import net.minecraft.block.Block;
import net.minecraft.client.renderer.texture.IIconRegister; |
<|file_name|>cc_salt_minion.py<|end_file_name|><|fim▁begin|># vi: ts=4 expandtab
#
# Copyright (C) 2014 Amazon.com, Inc. or its affiliates.
#
# Author: Jeff Bauer <[email protected]>
# Author: Andrew Jorgensen <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from cloudinit import util
# Note: see http://saltstack.org/topics/installation/
def handle(name, cfg, cloud, log, _args):
# If there isn't a salt key in the configuration don't do anything
if 'salt_minion' not in cfg:
log.debug(("Skipping module named %s,"
" no 'salt_minion' key in configuration"), name)
return
salt_cfg = cfg['salt_minion']
# Start by installing the salt package ...
cloud.distro.install_packages(('salt-minion',))
# Ensure we can configure files at the right dir
config_dir = salt_cfg.get("config_dir", '/etc/salt')
util.ensure_dir(config_dir)
# ... and then update the salt configuration
if 'conf' in salt_cfg:
# Add all sections from the conf object to /etc/salt/minion
minion_config = os.path.join(config_dir, 'minion')
minion_data = util.yaml_dumps(salt_cfg.get('conf'))
util.write_file(minion_config, minion_data)
# ... copy the key pair if specified
if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
with util.umask(077):
util.ensure_dir(pki_dir)
pub_name = os.path.join(pki_dir, 'minion.pub')
pem_name = os.path.join(pki_dir, 'minion.pem')
util.write_file(pub_name, salt_cfg['public_key'])
util.write_file(pem_name, salt_cfg['private_key'])
# start / restart salt-minion. if it was started, it needs to be restarted<|fim▁hole|> if cloud.distro.service_running('salt-minion'):
cloud.distro.service_control('salt-minion', 'restart', capture=False)
else:
cloud.distro.service_control('salt-minion', 'start', capture=False)<|fim▁end|> | # for config change. |
<|file_name|>setting.controller.js<|end_file_name|><|fim▁begin|>import Setting from '../models/setting';<|fim▁hole|>export function getSettings(req, res) {
Setting.find({ disable: false }).exec((err, settings) => {
if(err) {
res.json({ settings: [] });
} else {
res.json({ settings });
}
})
}<|fim▁end|> | import KhongDau from 'khong-dau';
|
<|file_name|>photos.js<|end_file_name|><|fim▁begin|>/* global $:true */
+ function($) {
var defaults;
var Photos = function(config) {
this.initConfig(config);
this.index = 0;
}
Photos.prototype = {
initConfig: function (config) {
this.config = $.extend({}, defaults, config);
this.activeIndex = this.lastActiveIndex = this.config.initIndex;
this.config.items = this.config.items.map(function(d, i) {
if(typeof d === typeof 'a') {
return {
image: d,
caption: ''
}
}
return d;
});
this.tpl = $.t7.compile(this.config.tpl);
if(this.config.autoOpen) this.open();
},
open: function (index) {
if (this._open) return false;
if (!this.modal) {
this.modal = $(this.tpl(this.config)).appendTo(document.body);
this.container = this.modal.find('.swiper-container');
this.wrapper = this.modal.find('.swiper-wrapper');
var hammer = new Hammer(this.container[0]);
hammer.get('pinch').set({ enable: true });
hammer.on('pinchstart', $.proxy(this.onGestureStart, this));
hammer.on('pinchmove', $.proxy(this.onGestureChange, this));
hammer.on('pinchend', $.proxy(this.onGestureEnd, this));
this.modal.on($.touchEvents.start, $.proxy(this.onTouchStart, this));
this.modal.on($.touchEvents.move, $.proxy(this.onTouchMove, this));
this.modal.on($.touchEvents.end, $.proxy(this.onTouchEnd, this));
//init index
this.wrapper.transition(0);
this.wrapper.transform('translate3d(-' + $(window).width()*this.config.initIndex + 'px,0,0)');
this.container.find('.caption-item').eq(this.config.initIndex).addClass('active');
this.container.find('.swiper-pagination-bullet').eq(this.config.initIndex).addClass('swiper-pagination-bullet-active');
}
var self = this;
this.modal.show().height();
this.modal.addClass('weui-photo-browser-modal-visible');
this.container.addClass('swiper-container-visible').transitionEnd(function() {
self.initParams();
if(index !== undefined) {
self.slideTo(index);
}
if(self.config.onOpen) {
self.config.onOpen.call(self);
}
});
this._open = true;
},
close: function() {
this.container.transitionEnd($.proxy(function() {
this.modal.hide();
this._open = false;
if(this.config.onClose) this.config.onClose.call(this);
}, this));
this.container.removeClass('swiper-container-visible');
this.modal.removeClass('weui-photo-browser-modal-visible');
},
initParams: function () {
if(this.containerHeight) return false;
this.windowWidth = $(window).width();
this.containerHeight = this.container.height();
this.containerWidth = this.container.width();
this.touchStart = {};
this.wrapperTransform = 0;
this.wrapperLastTransform = - $(window).width()*this.config.initIndex;
this.wrapperDiff = 0;
this.lastScale = 1;
this.currentScale = 1;
this.imageLastTransform = { x: 0, y: 0 };
this.imageTransform = { x: 0, y: 0 };
this.imageDiff = { x: 0, y: 0 };
this.imageLastDiff = { x: 0, y: 0 };
},
onTouchStart: function (e) {
if(this.scaling) return false;
this.touching = true;
this.touchStart = $.getTouchPosition(e);
this.touchMove = null;
this.touchStartTime = + new Date;
this.wrapperDiff = 0;
this.breakpointPosition = null;
},
onTouchMove: function (e) {
if(!this.touching || this.scaling) return false;
e.preventDefault();
if(this.gestureImage) {
var rect = this.gestureImage[0].getBoundingClientRect();
if (rect.left >= 0 || rect.right <= this.windowWidth) {
this.overflow = true;
} else {
this.overflow = false;
}
} else {
this.oveflow = false;
}
var p = this.touchMove = $.getTouchPosition(e);
if(this.currentScale === 1 || this.overflow) {
if(this.breakpointPosition) {
this.wrapperDiff = p.x - this.breakpointPosition.x;
} else {
this.wrapperDiff = p.x - this.touchStart.x;
}
if(this.activeIndex === 0 && this.wrapperDiff > 0) this.wrapperDiff = Math.pow(this.wrapperDiff, .8);
if(this.activeIndex === this.config.items.length - 1 && this.wrapperDiff < 0) this.wrapperDiff = - Math.pow(-this.wrapperDiff, .8);
this.wrapperTransform = this.wrapperLastTransform + this.wrapperDiff;
this.doWrapperTransform();
} else {
var img = this.gestureImage;
this.imageDiff = {
x: p.x - this.touchStart.x,
y: p.y - this.touchStart.y
}
this.imageTransform = {
x: this.imageDiff.x + this.imageLastTransform.x,
y: this.imageDiff.y + this.imageLastTransform.y
};
this.doImageTransform();
this.breakpointPosition = p;
this.imageLastDiff = this.imageDiff;
}
},
onTouchEnd: function (e) {
if(!this.touching) return false;
this.touching = false;
if(this.scaling) return false;
var duration = (+ new Date) - this.touchStartTime;
if(duration < 200 && (!this.touchMove || Math.abs(this.touchStart.x - this.touchMove.x) <= 2 && Math.abs(this.touchStart.y - this.touchMove.y) <= 2)) {
this.onClick();
return;
}
if(this.wrapperDiff > 0) {
if(this.wrapperDiff > this.containerWidth/2 || (this.wrapperDiff > 20 && duration < 300)) {
this.slidePrev();
} else {
this.slideTo(this.activeIndex, 200);
}
} else {
if(- this.wrapperDiff > this.containerWidth/2 || (-this.wrapperDiff > 20 && duration < 300)) {
this.slideNext();
} else {
this.slideTo(this.activeIndex, 200);
}
}
this.imageLastTransform = this.imageTransform;
this.adjust();
},
onClick: function () {
var self = this;
if (this._lastClickTime && ( + new Date - this._lastClickTime < 300)) {
this.onDoubleClick();
clearTimeout(this._clickTimeout);
} else {
this._clickTimeout = setTimeout(function () {
self.close();
}, 300);
}
this._lastClickTime = + new Date;
},
onDoubleClick: function () {
this.gestureImage = this.container.find('.swiper-slide').eq(this.activeIndex).find('img');
this.currentScale = this.currentScale > 1 ? 1 : 2;
this.doImageTransform(200);
this.adjust();
},
onGestureStart: function (e) {
this.scaling = true;
this.gestureImage = this.container.find('.swiper-slide').eq(this.activeIndex).find('img');
},
onGestureChange: function (e) {
var s = this.lastScale * e.scale;
if (s > this.config.maxScale) {
s = this.config.maxScale + Math.pow((s - this.config.maxScale), 0.5);
} else if (s < 1) {
s = Math.pow(s, .5);
}
this.currentScale = s;
this.doImageTransform();
},
onGestureEnd: function (e) {
if (this.currentScale > this.config.maxScale) {
this.currentScale = this.config.maxScale;
this.doImageTransform(200);
} else if (this.currentScale < 1) {
this.currentScale = 1;
this.doImageTransform(200);
}
this.lastScale = this.currentScale;
this.scaling = false;
this.adjust();
},
doWrapperTransform: function(duration, callback) {
if (duration === 0) {
var origin = this.wrapper.css('transition-property')
this.wrapper.css('transition-property', 'none').transform('translate3d(' + this.wrapperTransform + 'px, 0, 0)');
this.wrapper.css('transition-property', origin);
callback()
} else {
this.wrapper.transitionEnd(function() {
callback && callback();
});
this.wrapper.transition(duration || defaults.duration).transform('translate3d(' + this.wrapperTransform + 'px, 0, 0)');
}
},
doImageTransform: function(duration, callback) {
if(!this.gestureImage) return;
this.gestureImage.transition(duration || 0).transform('translate3d(' + this.imageTransform.x + 'px,' + this.imageTransform.y + 'px, 0) scale(' + this.currentScale + ')');
this._needAdjust = true;
},
adjust: function() {
if(!this._needAdjust) return false;
var img = this.gestureImage;
if(!img) return false;
if(this.currentScale === 1) {
this.imageTransform = this.imageLastDiff = {x:0,y:0};
this.doImageTransform(200);
return;
}
var rect = img[0].getBoundingClientRect();
//调整上下
if(rect.height < this.containerHeight) { // 如果高度没容器高,则自动居中
this.imageTransform.y = this.imageLastTransform.y = 0;
} else { //如果比容器高,那么要保证上下不能有空隙
if(rect.top > 0) this.imageTransform.y = this.imageTransform.y - rect.top;
else if(rect.bottom < this.containerHeight) this.imageTransform.y = this.imageTransform.y + this.containerHeight - rect.bottom;
}
this.doImageTransform(200);
this._needAdjust = false; // must at last line, because doImageTransform will set this._needAdjust true
},
slideTo: function(index, duration) {
if(index < 0) index = 0;
if(index > this.config.items.length-1) index = this.config.items.length - 1;
this.lastActiveIndex = this.activeIndex;
this.activeIndex = index;
this.wrapperTransform = - (index * this.containerWidth);
this.wrapperLastTransform = this.wrapperTransform;
this.doWrapperTransform(duration, $.proxy(function() {
if(this.lastActiveIndex === this.activeIndex) return false; // active index not change
this.container.find('.caption-item.active').removeClass('active');
this.container.find('.swiper-slide-active').removeClass('swiper-slide-active');
this.container.find('.swiper-pagination-bullet-active').removeClass('swiper-pagination-bullet-active');
this.container.find('.caption-item').eq(this.activeIndex).addClass('active');
this.container.find('.swiper-slide').eq(this.activeIndex).addClass('swiper-slide-active');
this.container.find('.swiper-pagination-bullet').eq(this.activeIndex).addClass('swiper-pagination-bullet-active');
//reset image transform
this.container.find('.swiper-slide img[style]').transition(0).transform('translate3d(0,0,0) scale(1)');
this.lastScale = 1;
this.currentScale = 1;
this.imageLastTransform = { x: 0, y: 0 };
this.imageTransform = { x: 0, y: 0 };
this.imageDiff = { x: 0, y: 0 };
this.imageLastDiff = { x: 0, y: 0 };
if(this.config.onSlideChange) {
this.config.onSlideChange.call(this, this.activeIndex);
}
}, this));
},
slideNext: function() {
return this.slideTo(this.activeIndex+1, 200);
},
slidePrev: function() {
return this.slideTo(this.activeIndex-1, 200);
}
}
defaults = Photos.prototype.defaults = {
items: [],
autoOpen: false, //初始化完成之后立刻打开
onOpen: undefined,
onClose: undefined,
initIndex: 0, //打开时默认显示第几张
maxScale: 3,
onSlideChange: undefined,
duration: 200, // 默认动画时间,如果没有在调用函数的时候指定,则使用这个值<|fim▁hole|> <div class="swiper-container">\
<div class="swiper-wrapper">\
{{#items}}\
<div class="swiper-slide">\
<div class="photo-container">\
<img src="{{image}}" />\
</div>\
</div>\
{{/items}}\
</div>\
<div class="caption">\
{{#items}}\
<div class="caption-item caption-item-{{@index}}">{{caption}}</div>\
{{/items}}\
</div>\
<div class="swiper-pagination swiper-pagination-bullets">\
{{#items}}\
<span class="swiper-pagination-bullet"></span>\
{{/items}}\
</div>\
</div>\
</div>'
}
$.photoBrowser = function(params) {
return new Photos(params);
}
}($);<|fim▁end|> | tpl: '<div class="weui-photo-browser-modal">\ |
<|file_name|>cvs.py<|end_file_name|><|fim▁begin|># CVS conversion code inspired by hg-cvs-import and git-cvsimport
import os, locale, re, socket
from cStringIO import StringIO
from mercurial import util
from common import NoRepo, commit, converter_source, checktool
class convert_cvs(converter_source):
def __init__(self, ui, path, rev=None):
super(convert_cvs, self).__init__(ui, path, rev=rev)
cvs = os.path.join(path, "CVS")
if not os.path.exists(cvs):
raise NoRepo("%s does not look like a CVS checkout" % path)
self.cmd = ui.config('convert', 'cvsps', 'cvsps -A -u --cvs-direct -q')
cvspsexe = self.cmd.split(None, 1)[0]
for tool in (cvspsexe, 'cvs'):
checktool(tool)
self.changeset = {}
self.files = {}
self.tags = {}
self.lastbranch = {}
self.parent = {}
self.socket = None
self.cvsroot = file(os.path.join(cvs, "Root")).read()[:-1]
self.cvsrepo = file(os.path.join(cvs, "Repository")).read()[:-1]
self.encoding = locale.getpreferredencoding()
self._parse()
self._connect()
def _parse(self):
if self.changeset:
return
maxrev = 0
cmd = self.cmd
if self.rev:
# TODO: handle tags
try:
# patchset number?
maxrev = int(self.rev)
except ValueError:
try:
# date
util.parsedate(self.rev, ['%Y/%m/%d %H:%M:%S'])
cmd = '%s -d "1970/01/01 00:00:01" -d "%s"' % (cmd, self.rev)
except util.Abort:
raise util.Abort('revision %s is not a patchset number or date' % self.rev)
d = os.getcwd()
try:
os.chdir(self.path)
id = None
state = 0
filerevids = {}
for l in util.popen(cmd):
if state == 0: # header
if l.startswith("PatchSet"):
id = l[9:-2]
if maxrev and int(id) > maxrev:
# ignore everything
state = 3
elif l.startswith("Date"):
date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"])
date = util.datestr(date)
elif l.startswith("Branch"):
branch = l[8:-1]
self.parent[id] = self.lastbranch.get(branch, 'bad')
self.lastbranch[branch] = id
elif l.startswith("Ancestor branch"):
ancestor = l[17:-1]
# figure out the parent later
self.parent[id] = self.lastbranch[ancestor]
elif l.startswith("Author"):
author = self.recode(l[8:-1])
elif l.startswith("Tag:") or l.startswith("Tags:"):
t = l[l.index(':')+1:]
t = [ut.strip() for ut in t.split(',')]
if (len(t) > 1) or (t[0] and (t[0] != "(none)")):
self.tags.update(dict.fromkeys(t, id))
elif l.startswith("Log:"):
# switch to gathering log
state = 1
log = ""
elif state == 1: # log
if l == "Members: \n":
# switch to gathering members
files = {}
oldrevs = []
log = self.recode(log[:-1])
state = 2
else:
# gather log
log += l
elif state == 2: # members
if l == "\n": # start of next entry
state = 0
p = [self.parent[id]]
if id == "1":
p = []
if branch == "HEAD":
branch = ""
if branch:
latest = None
# the last changeset that contains a base
# file is our parent
for r in oldrevs:
latest = max(filerevids.get(r, None), latest)
if latest:
p = [latest]
# add current commit to set
c = commit(author=author, date=date, parents=p,
desc=log, branch=branch)
self.changeset[id] = c
self.files[id] = files
else:
colon = l.rfind(':')
file = l[1:colon]
rev = l[colon+1:-2]
oldrev, rev = rev.split("->")
files[file] = rev
# save some information for identifying branch points
oldrevs.append("%s:%s" % (oldrev, file))
filerevids["%s:%s" % (rev, file)] = id
elif state == 3:
# swallow all input
continue
self.heads = self.lastbranch.values()
finally:
os.chdir(d)
def _connect(self):
root = self.cvsroot
conntype = None
user, host = None, None
cmd = ['cvs', 'server']
self.ui.status("connecting to %s\n" % root)
if root.startswith(":pserver:"):
root = root[9:]
m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
root)
if m:
conntype = "pserver"
user, passw, serv, port, root = m.groups()
if not user:
user = "anonymous"
if not port:
port = 2401
else:
port = int(port)
format0 = ":pserver:%s@%s:%s" % (user, serv, root)
format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
if not passw:
passw = "A"
pf = open(os.path.join(os.environ["HOME"], ".cvspass"))
for line in pf.read().splitlines():
part1, part2 = line.split(' ', 1)
if part1 == '/1':
# /1 :pserver:[email protected]:2401/cvsroot/foo Ah<Z
part1, part2 = part2.split(' ', 1)
format = format1
else:
# :pserver:[email protected]:/cvsroot/foo Ah<Z
format = format0
if part1 == format:
passw = part2
break
pf.close()
sck = socket.socket()
sck.connect((serv, port))
sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
"END AUTH REQUEST", ""]))
if sck.recv(128) != "I LOVE YOU\n":
raise util.Abort("CVS pserver authentication failed")
self.writep = self.readp = sck.makefile('r+')
if not conntype and root.startswith(":local:"):
conntype = "local"
root = root[7:]
if not conntype:
# :ext:user@host/home/user/path/to/cvsroot
if root.startswith(":ext:"):
root = root[5:]
m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
# Do not take Windows path "c:\foo\bar" for a connection strings
if os.path.isdir(root) or not m:
conntype = "local"
else:
conntype = "rsh"
user, host, root = m.group(1), m.group(2), m.group(3)<|fim▁hole|>
if conntype != "pserver":
if conntype == "rsh":
rsh = os.environ.get("CVS_RSH") or "ssh"
if user:
cmd = [rsh, '-l', user, host] + cmd
else:
cmd = [rsh, host] + cmd
# popen2 does not support argument lists under Windows
cmd = [util.shellquote(arg) for arg in cmd]
cmd = util.quotecommand(' '.join(cmd))
self.writep, self.readp = os.popen2(cmd, 'b')
self.realroot = root
self.writep.write("Root %s\n" % root)
self.writep.write("Valid-responses ok error Valid-requests Mode"
" M Mbinary E Checked-in Created Updated"
" Merged Removed\n")
self.writep.write("valid-requests\n")
self.writep.flush()
r = self.readp.readline()
if not r.startswith("Valid-requests"):
raise util.Abort("server sucks")
if "UseUnchanged" in r:
self.writep.write("UseUnchanged\n")
self.writep.flush()
r = self.readp.readline()
def getheads(self):
return self.heads
def _getfile(self, name, rev):
def chunkedread(fp, count):
# file-objects returned by socked.makefile() do not handle
# large read() requests very well.
chunksize = 65536
output = StringIO()
while count > 0:
data = fp.read(min(count, chunksize))
if not data:
raise util.Abort("%d bytes missing from remote file" % count)
count -= len(data)
output.write(data)
return output.getvalue()
if rev.endswith("(DEAD)"):
raise IOError
args = ("-N -P -kk -r %s --" % rev).split()
args.append(self.cvsrepo + '/' + name)
for x in args:
self.writep.write("Argument %s\n" % x)
self.writep.write("Directory .\n%s\nco\n" % self.realroot)
self.writep.flush()
data = ""
while 1:
line = self.readp.readline()
if line.startswith("Created ") or line.startswith("Updated "):
self.readp.readline() # path
self.readp.readline() # entries
mode = self.readp.readline()[:-1]
count = int(self.readp.readline()[:-1])
data = chunkedread(self.readp, count)
elif line.startswith(" "):
data += line[1:]
elif line.startswith("M "):
pass
elif line.startswith("Mbinary "):
count = int(self.readp.readline()[:-1])
data = chunkedread(self.readp, count)
else:
if line == "ok\n":
return (data, "x" in mode and "x" or "")
elif line.startswith("E "):
self.ui.warn("cvs server: %s\n" % line[2:])
elif line.startswith("Remove"):
l = self.readp.readline()
l = self.readp.readline()
if l != "ok\n":
raise util.Abort("unknown CVS response: %s" % l)
else:
raise util.Abort("unknown CVS response: %s" % line)
def getfile(self, file, rev):
data, mode = self._getfile(file, rev)
self.modecache[(file, rev)] = mode
return data
def getmode(self, file, rev):
return self.modecache[(file, rev)]
def getchanges(self, rev):
self.modecache = {}
files = self.files[rev]
cl = files.items()
cl.sort()
return (cl, {})
def getcommit(self, rev):
return self.changeset[rev]
def gettags(self):
return self.tags
def getchangedfiles(self, rev, i):
files = self.files[rev].keys()
files.sort()
return files<|fim▁end|> | |
<|file_name|>parse_shebang_test.py<|end_file_name|><|fim▁begin|>from __future__ import annotations
import contextlib
import os.path
import shutil
import sys
import pytest
from pre_commit import parse_shebang
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import Var
from pre_commit.util import make_executable
def _echo_exe() -> str:
exe = shutil.which('echo')
assert exe is not None
return exe
def test_file_doesnt_exist():
assert parse_shebang.parse_filename('herp derp derp') == ()
def test_simple_case(tmpdir):
x = tmpdir.join('f')
x.write('#!/usr/bin/env echo')
make_executable(x.strpath)
assert parse_shebang.parse_filename(x.strpath) == ('echo',)
def test_find_executable_full_path():
assert parse_shebang.find_executable(sys.executable) == sys.executable
def test_find_executable_on_path():
assert parse_shebang.find_executable('echo') == _echo_exe()
def test_find_executable_not_found_none():
assert parse_shebang.find_executable('not-a-real-executable') is None
def write_executable(shebang, filename='run'):
os.mkdir('bin')
path = os.path.join('bin', filename)
with open(path, 'w') as f:
f.write(f'#!{shebang}')
make_executable(path)
return path
@contextlib.contextmanager
def bin_on_path():
bindir = os.path.join(os.getcwd(), 'bin')
with envcontext((('PATH', (bindir, os.pathsep, Var('PATH'))),)):
yield
def test_find_executable_path_added(in_tmpdir):
path = os.path.abspath(write_executable('/usr/bin/env sh'))
assert parse_shebang.find_executable('run') is None
with bin_on_path():
assert parse_shebang.find_executable('run') == path
def test_find_executable_path_ext(in_tmpdir):
"""Windows exports PATHEXT as a list of extensions to automatically add
to executables when doing PATH searching.
"""
exe_path = os.path.abspath(
write_executable('/usr/bin/env sh', filename='run.myext'),
)
env_path = {'PATH': os.path.dirname(exe_path)}
env_path_ext = dict(env_path, PATHEXT=os.pathsep.join(('.exe', '.myext')))
assert parse_shebang.find_executable('run') is None
assert parse_shebang.find_executable('run', _environ=env_path) is None
ret = parse_shebang.find_executable('run.myext', _environ=env_path)
assert ret == exe_path
ret = parse_shebang.find_executable('run', _environ=env_path_ext)
assert ret == exe_path
def test_normexe_does_not_exist():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('i-dont-exist-lol')
assert excinfo.value.args == ('Executable `i-dont-exist-lol` not found',)
def test_normexe_does_not_exist_sep():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./i-dont-exist-lol')
assert excinfo.value.args == ('Executable `./i-dont-exist-lol` not found',)
@pytest.mark.xfail(os.name == 'nt', reason='posix only')
def test_normexe_not_executable(tmpdir): # pragma: win32 no cover
tmpdir.join('exe').ensure()
with tmpdir.as_cwd(), pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./exe')
assert excinfo.value.args == ('Executable `./exe` is not executable',)
def test_normexe_is_a_directory(tmpdir):
with tmpdir.as_cwd():
tmpdir.join('exe').ensure_dir()
exe = os.path.join('.', 'exe')
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe(exe)
msg, = excinfo.value.args
assert msg == f'Executable `{exe}` is a directory'
def test_normexe_already_full_path():
assert parse_shebang.normexe(sys.executable) == sys.executable
def test_normexe_gives_full_path():
assert parse_shebang.normexe('echo') == _echo_exe()
assert os.sep in _echo_exe()
def test_normalize_cmd_trivial():
cmd = (_echo_exe(), 'hi')
assert parse_shebang.normalize_cmd(cmd) == cmd<|fim▁hole|>
def test_normalize_cmd_PATH():
cmd = ('echo', '--version')
expected = (_echo_exe(), '--version')
assert parse_shebang.normalize_cmd(cmd) == expected
def test_normalize_cmd_shebang(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
assert parse_shebang.normalize_cmd((path,)) == (echo, path)
def test_normalize_cmd_PATH_shebang_full_path(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))
def test_normalize_cmd_PATH_shebang_PATH(in_tmpdir):
echo = _echo_exe()
path = write_executable('/usr/bin/env echo')
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))<|fim▁end|> | |
<|file_name|>node_identifier_test.rs<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use common::FileKey;
use graphql_ir::{build, ExecutableDefinition, Selection};
use graphql_syntax::parse;
use graphql_transforms::NodeIdentifier;
use test_schema::TEST_SCHEMA;
fn get_selection(def: &ExecutableDefinition) -> &Selection {
if let ExecutableDefinition::Fragment(frag) = def {
&frag.selections[0]
} else {
panic!("No selection found in: {:#?}", &def);
}
}
fn are_selections_equal(graphql: &str) -> bool {
let file_key = FileKey::new("test");
let ast = parse(graphql, file_key).unwrap();
let ir = build(&TEST_SCHEMA, &ast.definitions).unwrap();
let left = NodeIdentifier::from_selection(&TEST_SCHEMA, get_selection(&ir[0]));
let right = NodeIdentifier::from_selection(&TEST_SCHEMA, get_selection(&ir[1]));
left == right
}
#[test]
fn test_fields() {
assert!(are_selections_equal(
r#"
fragment Left on User {
name @customDirective(level: 1)
}
fragment Right on User {
name @customDirective(level: 1)
}
"#
));
assert!(!are_selections_equal(
r#"
fragment Left on User {
name @customDirective(level: 1)
}
fragment Right on User {
name @customDirective(level: 2)
}
"#
));
assert!(are_selections_equal(
// When there is an alias, we only compare the alias instead of the original name
r#"
fragment Left on User {
name: username @customDirective(level: 1)
}
fragment Right on User {
name @customDirective(level: 1)
}
"#
));
assert!(are_selections_equal(
// We don't compare arguments for field identities
r#"
fragment Left on User {
firstName(if: true, unless: false)
}
fragment Right on User {
firstName(if: false, unless: true)
}
"#
));
assert!(are_selections_equal(
r#"
fragment Left on User {
zucktown: hometown @customDirective(level: 1){
id
lastName
}
}
fragment Right on User {
zucktown: hometown @customDirective(level: 1){
id
lastName
}
}
"#
));
}
#[test]
fn test_inline_fragments() {
assert!(are_selections_equal(
r#"
fragment Left on Actor {
... on User {
name
}
}
fragment Right on User {
... on User {
name
}
}
"#
));
assert!(!are_selections_equal(
r#"
fragment Left on Actor {
... on Actor {
name
}
}
fragment Right on Actor {
... on User {
name
}
}
"#
));
assert!(!are_selections_equal(
// We compare directives for inline fragments in Rust
r#"
fragment Left on Actor {
... on User @defer(label: "Zuck") {
name
}
}
fragment Right on User {
... on User @defer(label: "Mark") {
name
}
}
"#
));
}
#[test]
fn test_fragment_spreads() {
assert!(are_selections_equal(
r#"
fragment Left on User {
...CommonFragment
}
fragment Right on User {
...CommonFragment
}
fragment CommonFragment on User {
name
}
"#
));
assert!(!are_selections_equal(
r#"
fragment Left on User {
...SpreadLeft
}
fragment Right on User {
...Left
}
fragment SpreadLeft on User {
name
}
"#
));
assert!(are_selections_equal(
r#"
fragment Left on User {
...CommonFragment @arguments(pictureSize: [42])
}
fragment Right on User {
...CommonFragment @arguments(pictureSize: [42])
}
fragment CommonFragment on User
@argumentDefinitions(pictureSize: {type: "[Int]"}) {
profilePicture(size: $pictureSize) {
uri
}
}
"#
));
assert!(!are_selections_equal(
// Fragment spread with different arguments are not equal
r#"
fragment Left on User {
...CommonFragment @arguments(pictureSize: [0])
}
fragment Right on User {
...CommonFragment @arguments(pictureSize: [42])
}<|fim▁hole|> fragment CommonFragment on User
@argumentDefinitions(pictureSize: {type: "[Int]"}) {
profilePicture(size: $pictureSize) {
uri
}
}
"#
));
}
#[test]
fn test_conditions() {
assert!(are_selections_equal(
r#"
fragment Left on User {
...CommonFragment @include(if: $conditional1) @skip(if: $conditional3)
}
fragment Right on User {
...CommonFragment @include(if: $conditional1) @skip(if: $conditional3)
}
fragment CommonFragment on User {
name
}
"#
));
assert!(!are_selections_equal(
r#"
fragment Left on User {
...CommonFragment @include(if: $conditional1)
}
fragment Right on User {
...CommonFragment @include(if: $conditional2)
}
fragment CommonFragment on User {
name
}
"#
));
}<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#![feature(nll)]
#![feature(native_link_modifiers)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
// NOTE: This crate only exists to allow linking on mingw targets.
use libc::{c_char, size_t};
use std::cell::RefCell;
use std::slice;
#[repr(C)]
pub struct RustString {
pub bytes: RefCell<Vec<u8>>,
}
impl RustString {
pub fn len(&self) -> usize {
self.bytes.borrow().len()
}
pub fn is_empty(&self) -> bool {
self.bytes.borrow().is_empty()
}
}
/// Appending to a Rust string -- used by RawRustStringOstream.
#[no_mangle]
pub unsafe extern "C" fn LLVMRustStringWriteImpl(
sr: &RustString,
ptr: *const c_char,
size: size_t,
) {
let slice = slice::from_raw_parts(ptr as *const u8, size as usize);
sr.bytes.borrow_mut().extend_from_slice(slice);
}
/// Initialize targets enabled by the build script via `cfg(llvm_component = "...")`.
/// N.B., this function can't be moved to `rustc_codegen_llvm` because of the `cfg`s.
pub fn initialize_available_targets() {
macro_rules! init_target(
($cfg:meta, $($method:ident),*) => { {
#[cfg($cfg)]
fn init() {
extern "C" {
$(fn $method();)*
}
unsafe {
$($method();)*
}
}
#[cfg(not($cfg))]
fn init() { }
init();
} }
);
init_target!(
llvm_component = "x86",
LLVMInitializeX86TargetInfo,
LLVMInitializeX86Target,
LLVMInitializeX86TargetMC,
LLVMInitializeX86AsmPrinter,<|fim▁hole|> LLVMInitializeARMTargetInfo,
LLVMInitializeARMTarget,
LLVMInitializeARMTargetMC,
LLVMInitializeARMAsmPrinter,
LLVMInitializeARMAsmParser
);
init_target!(
llvm_component = "aarch64",
LLVMInitializeAArch64TargetInfo,
LLVMInitializeAArch64Target,
LLVMInitializeAArch64TargetMC,
LLVMInitializeAArch64AsmPrinter,
LLVMInitializeAArch64AsmParser
);
init_target!(
llvm_component = "amdgpu",
LLVMInitializeAMDGPUTargetInfo,
LLVMInitializeAMDGPUTarget,
LLVMInitializeAMDGPUTargetMC,
LLVMInitializeAMDGPUAsmPrinter,
LLVMInitializeAMDGPUAsmParser
);
init_target!(
llvm_component = "avr",
LLVMInitializeAVRTargetInfo,
LLVMInitializeAVRTarget,
LLVMInitializeAVRTargetMC,
LLVMInitializeAVRAsmPrinter,
LLVMInitializeAVRAsmParser
);
init_target!(
llvm_component = "m68k",
LLVMInitializeM68kTargetInfo,
LLVMInitializeM68kTarget,
LLVMInitializeM68kTargetMC,
LLVMInitializeM68kAsmPrinter,
LLVMInitializeM68kAsmParser
);
init_target!(
llvm_component = "mips",
LLVMInitializeMipsTargetInfo,
LLVMInitializeMipsTarget,
LLVMInitializeMipsTargetMC,
LLVMInitializeMipsAsmPrinter,
LLVMInitializeMipsAsmParser
);
init_target!(
llvm_component = "powerpc",
LLVMInitializePowerPCTargetInfo,
LLVMInitializePowerPCTarget,
LLVMInitializePowerPCTargetMC,
LLVMInitializePowerPCAsmPrinter,
LLVMInitializePowerPCAsmParser
);
init_target!(
llvm_component = "systemz",
LLVMInitializeSystemZTargetInfo,
LLVMInitializeSystemZTarget,
LLVMInitializeSystemZTargetMC,
LLVMInitializeSystemZAsmPrinter,
LLVMInitializeSystemZAsmParser
);
init_target!(
llvm_component = "jsbackend",
LLVMInitializeJSBackendTargetInfo,
LLVMInitializeJSBackendTarget,
LLVMInitializeJSBackendTargetMC
);
init_target!(
llvm_component = "msp430",
LLVMInitializeMSP430TargetInfo,
LLVMInitializeMSP430Target,
LLVMInitializeMSP430TargetMC,
LLVMInitializeMSP430AsmPrinter,
LLVMInitializeMSP430AsmParser
);
init_target!(
llvm_component = "riscv",
LLVMInitializeRISCVTargetInfo,
LLVMInitializeRISCVTarget,
LLVMInitializeRISCVTargetMC,
LLVMInitializeRISCVAsmPrinter,
LLVMInitializeRISCVAsmParser
);
init_target!(
llvm_component = "sparc",
LLVMInitializeSparcTargetInfo,
LLVMInitializeSparcTarget,
LLVMInitializeSparcTargetMC,
LLVMInitializeSparcAsmPrinter,
LLVMInitializeSparcAsmParser
);
init_target!(
llvm_component = "nvptx",
LLVMInitializeNVPTXTargetInfo,
LLVMInitializeNVPTXTarget,
LLVMInitializeNVPTXTargetMC,
LLVMInitializeNVPTXAsmPrinter
);
init_target!(
llvm_component = "hexagon",
LLVMInitializeHexagonTargetInfo,
LLVMInitializeHexagonTarget,
LLVMInitializeHexagonTargetMC,
LLVMInitializeHexagonAsmPrinter,
LLVMInitializeHexagonAsmParser
);
init_target!(
llvm_component = "webassembly",
LLVMInitializeWebAssemblyTargetInfo,
LLVMInitializeWebAssemblyTarget,
LLVMInitializeWebAssemblyTargetMC,
LLVMInitializeWebAssemblyAsmPrinter,
LLVMInitializeWebAssemblyAsmParser
);
init_target!(
llvm_component = "bpf",
LLVMInitializeBPFTargetInfo,
LLVMInitializeBPFTarget,
LLVMInitializeBPFTargetMC,
LLVMInitializeBPFAsmPrinter,
LLVMInitializeBPFAsmParser
);
}<|fim▁end|> | LLVMInitializeX86AsmParser
);
init_target!(
llvm_component = "arm", |
<|file_name|>array.d.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license<|fim▁hole|><|fim▁end|> | */
/** Wraps the provided value in an array, unless the provided value is an array. */
export declare function coerceArray<T>(value: T | T[]): T[]; |
<|file_name|>oneview.py<|end_file_name|><|fim▁begin|># Copyright 2016 Intel Corporation
# Copyright 2015 Hewlett Packard Development Company, LP
# Copyright 2015 Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ironic.common.i18n import _
opts = [
cfg.StrOpt('manager_url',
help=_('URL where OneView is available.')),
cfg.StrOpt('username',
help=_('OneView username to be used.')),
cfg.StrOpt('password',
secret=True,
help=_('OneView password to be used.')),
cfg.BoolOpt('allow_insecure_connections',
default=False,
help=_('Option to allow insecure connection with OneView.')),
cfg.StrOpt('tls_cacert_file',
help=_('Path to CA certificate.')),
cfg.IntOpt('max_polling_attempts',
default=12,
help=_('Max connection retries to check changes on OneView.')),
cfg.BoolOpt('enable_periodic_tasks',
default=True,
help=_('Whether to enable the periodic tasks for OneView '
'driver be aware when OneView hardware resources are '
'taken and released by Ironic or OneView users '
'and proactively manage nodes in clean fail state '
'according to Dynamic Allocation model of hardware '<|fim▁hole|> cfg.IntOpt('periodic_check_interval',
default=300,
help=_('Period (in seconds) for periodic tasks to be '
'executed when enable_periodic_tasks=True.')),
]
def register_opts(conf):
conf.register_opts(opts, group='oneview')<|fim▁end|> | 'resources allocation in OneView.')), |
<|file_name|>Argonauta.java<|end_file_name|><|fim▁begin|>/*
* ISABEL: A group collaboration tool for the Internet
* Copyright (C) 2011 Agora System S.A.
*
* This file is part of Isabel.
*
* Isabel is free software: you can redistribute it and/or modify
* it under the terms of the Affero GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Isabel is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Affero GNU General Public License for more details.
*
* You should have received a copy of the Affero GNU General Public License
* along with Isabel. If not, see <http://www.gnu.org/licenses/>.
*/
package isabel.argonauta;
import javax.swing.*;
import javax.swing.filechooser.*;
import java.awt.*;
import java.awt.event.*;
/**
* File browser.
*
* Select documents are open with xdg-open
*/
public class Argonauta {
JFileChooser fc;
/**
* main method.
*/
public static void main(String[] args) {
Argonauta demo = new Argonauta();
}
/**
* FileChooserDemo Constructor
*/
public Argonauta() {
fc = new JFileChooser();
fc.setDragEnabled(false);
// set the current directory:
// fc.setCurrentDirectory(swingFile);
// Add file filters:
javax.swing.filechooser.FileFilter filter;
filter = new FileNameExtensionFilter("Images", "jpg", "jpeg", "png",
"gif");
fc.addChoosableFileFilter(filter);
filter = new FileNameExtensionFilter("PDF", "PDF");
fc.addChoosableFileFilter(filter);
<|fim▁hole|>
fc.setAcceptAllFileFilterUsed(true);
// remove the approve/cancel buttons
fc.setControlButtonsAreShown(false);
// Actions & Listener:
Action openAction = createOpenAction();
Action dismissAction = createDismissAction();
fc.addActionListener(openAction);
// make custom controls
JPanel buttons = new JPanel();
buttons.add(new JButton(dismissAction));
buttons.add(new JButton(openAction));
// Main Window:
JFrame jf = new JFrame("File browser");
jf.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
jf.getContentPane().add(fc, BorderLayout.CENTER);
jf.getContentPane().add(buttons, BorderLayout.SOUTH);
jf.pack();
jf.setVisible(true);
}
public Action createOpenAction() {
return new AbstractAction("Open") {
public void actionPerformed(ActionEvent e) {
if (!e.getActionCommand().equals(JFileChooser.CANCEL_SELECTION)
&& fc.getSelectedFile() != null) {
openDocument(fc.getSelectedFile().getPath());
}
}
};
}
public Action createDismissAction() {
return new AbstractAction("Dismiss") {
public void actionPerformed(ActionEvent e) {
System.exit(0);
}
};
}
private void openDocument(String name) {
try {
Runtime rt = Runtime.getRuntime();
String[] command = { "xdg-open", name };
rt.exec(command);
} catch (Exception e) {
System.err.println("I can not open this document: " + name);
}
}
}<|fim▁end|> | filter = new FileNameExtensionFilter("Office", "ppt", "pptx", "doc",
"docx");
fc.addChoosableFileFilter(filter); |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>import numpy as np
import cv2
from sys import argv
class Test:
def __init__(self, name, image):
self.image = image
self.name = name
self.list = []
def add(self, function):
self.list.append(function)
def run(self):
cv2.imshow(self.name, self.image)
for function in self.list:
<|fim▁hole|>
def grayscale(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return image
def median(image):
cv2.medianBlur(image, 9, image)
return image
def unsharp(image):
image2 = cv2.GaussianBlur(image, (21,21), 21)
iamge = cv2.addWeighted(image, 1.5, image2, -0.5, 0, image)
return image
def harris(image):
x33 = image.shape[1] / 3
x66 = image.shape[1] / 3 * 2
dst1 = cv2.goodFeaturesToTrack(image[:,:x33], 10, 0.1, 5)
mean1 = np.uint8(cv2.mean(dst1))
cv2.circle(image, (mean1[0], mean1[1]), 2, 255)
dst2 = cv2.goodFeaturesToTrack(image[:,x66:], 10, 0.1, 5)
dst2 += [x66, 0]
mean2 = np.uint8(cv2.mean(dst2))
cv2.circle(image, (mean2[0], mean2[1]), 2, 255)
return image
if __name__ == '__main__':
image = cv2.imread(argv[1])
test = Test('Test', image)
test.add(grayscale)
test.add(median)
test.add(harris)
test.run()<|fim▁end|> | cv2.waitKey()
self.image = function(self.image)
cv2.imshow(self.name, self.image)
cv2.waitKey()
|
<|file_name|>_createAggregator.js<|end_file_name|><|fim▁begin|>var arrayAggregator = require('./_arrayAggregator'),
baseAggregator = require('./_baseAggregator'),
baseIteratee = require('./_baseIteratee'),
isArray = require('./isArray');
/**
* Creates a function like `_.groupBy`.
*
* @private
* @param {Function} setter The function to set accumulator values.
* @param {Function} [initializer] The accumulator object initializer.
* @returns {Function} Returns the new aggregator function.
*/
function createAggregator(setter, initializer) {
<|fim▁hole|>
return func(collection, setter, baseIteratee(iteratee, 2), accumulator);
};
}
module.exports = createAggregator;<|fim▁end|> | return function(collection, iteratee) {
var func = isArray(collection) ? arrayAggregator : baseAggregator,
accumulator = initializer ? initializer() : {};
|
<|file_name|>dates.js<|end_file_name|><|fim▁begin|>/**
* @arliteam/arli v0.2.1
* https://github.com/arliteam/arli
*
* Copyright (c) Mohamed Elkebir (https://getsupercode.com)
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
"use strict";
exports.__esModule = true;
/**
* Test if a date string is in latin DMY format.
*
* Date format: DD/MM/YY[YY] DD.MM.YY[YY] DD-MM-YY[YY] DD MM YY[YY]
*
* https://en.wikipedia.org/wiki/Date_format_by_country
*
* @example
* 30/12/2000 | 30/12/99
* 30-12-2000 | 30-12-99
* 30.12.2000 | 30.12.99<|fim▁hole|> * @param date A string of date to be tested
*/
exports.isDateDMY = function (date) {
var pattern = /^(31|30|(?:0[1-9]|[1-2][0-9]))(\/|\.|-| )(12|11|10|0[1-9])(\2)(\d{4}|\d{2})$/;
return pattern.test(date);
};
/**
* Test if a date string is in latin MDY format.
*
* Date format: MM/DD/YY[YY] MM.DD.YY[YY] MM-DD-YY[YY] MM DD YY[YY]
*
* https://en.wikipedia.org/wiki/Date_format_by_country
*
* @example
* 12/30/2000 | 12/30/99
* 12-30-2000 | 12-30-99
* 12.30.2000 | 12.30.99
* 12 30 2000 | 12 30 99
*
* @param date A string of date to be tested
*/
exports.isDateMDY = function (date) {
var pattern = /^(12|11|10|0[1-9])(\/|\.|-| )(31|30|(?:0[1-9]|[1-2][0-9]))(\2)(\d{4}|\d{2})$/;
return pattern.test(date);
};
/**
* Test if a date string is in latin YMD format.
*
* Date format: YY[YY]/MM/DD YY[YY].MM.DD YY[YY]-MM-DD YY[YY] MM DD
*
* https://en.wikipedia.org/wiki/Date_format_by_country
*
* @example
* 2000/12/30 | 99/12/30
* 2000-12-30 | 99-12-30
* 2000.12.30 | 99.12.30
* 2000 12 30 | 99 12 30
*
* @param date A string of date to be tested
*/
exports.isDateYMD = function (date) {
var pattern = /^(\d{4}|\d{2})(\/|\.|-| )(12|11|10|0[1-9])(\2)(31|30|(?:0[1-9]|[1-2][0-9]))$/;
return pattern.test(date);
};<|fim▁end|> | * 30 12 2000 | 30 12 99
* |
<|file_name|>shader_source.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>use gfx;
pub static VERTEX_SRC: gfx::ShaderSource = shaders! {
GLSL_150: b"
#version 150 core
in vec3 pos;
out vec2 v_uv;
uniform mat4 model_view_projection;
void main() {
gl_Position = model_view_projection * vec4(pos, 1.0);
}
"
};
pub static FRAGMENT_SRC: gfx::ShaderSource = shaders! {
GLSL_150: b"
#version 150 core
out vec4 out_color;
uniform vec4 color;
void main() {
out_color = color;
}
"
};<|fim▁end|> | |
<|file_name|>GetPrefs.java<|end_file_name|><|fim▁begin|>package com.kensenter.p2poolwidget;
import android.content.Context;
import android.content.SharedPreferences;
<|fim▁hole|>public class GetPrefs {
public static final String PREFS_NAME = "p2poolwidgetprefs";
public String GetWidget(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("servername", null);
}
public String GetServer(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("servername", "");
}
public String getPayKey(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("paykey", "");
}
public Integer getPort(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getInt("portnum", 3332);
}
public Integer getHashLevel(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getInt("hashlevel", 2);
}
public Integer getAlertRate(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getInt("alertnum", 0);
}
public Integer getDOARate(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getInt("doanum", 50);
}
public String getEfficiency(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("efficiency", "");
}
public String getUptime(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("uptime", "");
}
public String getShares(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("shares", "");
}
public String getTimeToShare(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("toshare", "");
}
public String getRoundTime(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("roundtime", "");
}
public String getTimeToBlock(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("toblock", "");
}
public String getBlockValue(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("blockvalue", "");
}
public String getPoolRate(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getString("pool_rate", "");
}
public boolean getRemoveLine(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getBoolean("removeline", false);
}
public boolean getAlertOn(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getBoolean("alerton", true);
}
public boolean getDOAOn(Context ctxt, int WidgetId){
SharedPreferences settings = ctxt.getSharedPreferences(PREFS_NAME+WidgetId, 0);
return settings.getBoolean("doaon", true);
}
}<|fim▁end|> | |
<|file_name|>logout-test.js<|end_file_name|><|fim▁begin|>import Ember from "ember";
import { module, test } from 'qunit';
import startApp from '../helpers/start-app';
import { authenticateSession } from 'code-corps-ember/tests/helpers/ember-simple-auth';
import indexPage from '../pages/index';
<|fim▁hole|>
module('Acceptance: Logout', {
beforeEach: function() {
application = startApp();
},
afterEach: function() {
Ember.run(application, 'destroy');
}
});
test("Logging out", function(assert) {
assert.expect(2);
let user = server.create('user');
authenticateSession(application, { user_id: user.id });
indexPage.visit();
andThen(function() {
assert.equal(indexPage.navMenu.userMenu.logOut.text, "Log out", "Page contains logout link");
indexPage.navMenu.userMenu.logOut.click();
});
andThen(function() {
assert.equal(indexPage.navMenu.logIn.text, "Sign in", "Page contains login link");
});
});<|fim▁end|> | let application; |
<|file_name|>convert_string_literal.rs<|end_file_name|><|fim▁begin|>use rstest::*;
use std::net::SocketAddr;
#[rstest]
#[case(true, "1.2.3.4:42")]
#[case(true, r#"4.3.2.1:24"#)]
#[case(false, "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443")]
#[case(false, r#"[2aa1:db8:85a3:8af:1319:8a2e:375:4873]:344"#)]
#[case(false, "this.is.not.a.socket.address")]
#[case(false, r#"this.is.not.a.socket.address"#)]
fn cases(#[case] expected: bool, #[case] addr: SocketAddr) {
assert_eq!(expected, addr.is_ipv4());
}
#[rstest]
fn values(
#[values(
"1.2.3.4:42",
r#"4.3.2.1:24"#,
"this.is.not.a.socket.address",
r#"this.is.not.a.socket.address"#
)]
addr: SocketAddr,
) {
assert!(addr.is_ipv4())
}
#[rstest]
#[case(b"12345")]
fn not_convert_byte_array(#[case] cases: &[u8], #[values(b"abc")] values: &[u8]) {
assert_eq!(5, cases.len());
assert_eq!(3, values.len());
}
trait MyTrait {
fn my_trait(&self) -> u32 {
42
}
}
impl MyTrait for &str {}
#[rstest]
#[case("impl", "nothing")]
fn not_convert_impl(#[case] that_impl: impl MyTrait, #[case] s: &str) {
assert_eq!(42, that_impl.my_trait());
assert_eq!(42, s.my_trait());
}
#[rstest]
#[case("1.2.3.4", "1.2.3.4:42")]
#[case("1.2.3.4".to_owned(), "1.2.3.4:42")]
fn not_convert_generics<S: AsRef<str>>(#[case] ip: S, #[case] addr: SocketAddr) {
assert_eq!(addr.ip().to_string(), ip.as_ref());
}
struct MyType(String);
struct E;
impl core::str::FromStr for MyType {
type Err = E;<|fim▁hole|> fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"error" => Err(E),
inner => Ok(MyType(inner.to_owned())),
}
}
}
#[rstest]
#[case("hello", "hello")]
#[case("doesn't mater", "error")]
fn convert_without_debug(#[case] expected: &str, #[case] converted: MyType) {
assert_eq!(expected, converted.0);
}<|fim▁end|> | |
<|file_name|>convertingRawFStringQuotes_after.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | s = rf"f<caret>oo{'bar'}" |
<|file_name|>TruffleExceptionTest.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* The Universal Permissive License (UPL), Version 1.0
*
* Subject to the condition set forth below, permission is hereby granted to any
* person obtaining a copy of this software, associated documentation and/or
* data (collectively the "Software"), free of charge and under any and all
* copyright rights in the Software, and any and all patent rights owned or
* freely licensable by each licensor hereunder covering either (i) the
* unmodified Software as contributed to or provided by such licensor, or (ii)
* the Larger Works (as defined below), to deal in both
*
* (a) the Software, and
*
* (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
* one is included with the Software each a "Larger Work" to which the Software
* is contributed by such licensors),
*
* without restriction, including without limitation the rights to copy, create
* derivative works of, display, perform, and distribute the Software and make,
* use, sell, offer for sale, import, export, have made, and have sold the
* Software and the Larger Work(s), and to sublicense the foregoing rights on
* either these or other terms.
*
* This license is subject to the following condition:
*
* The above copyright notice and either this complete permission notice or at a
* minimum a reference to the UPL must be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.oracle.truffle.api.test;
import static com.oracle.truffle.api.test.RootNodeTest.verifyStackTraceElementGuestObject;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayDeque;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Queue;
import java.util.Set;
import java.util.function.Consumer;
import java.util.logging.Handler;
import java.util.logging.LogRecord;
import java.util.regex.Pattern;
import org.graalvm.polyglot.Context;
import org.graalvm.polyglot.PolyglotException;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import com.oracle.truffle.api.CallTarget;
import com.oracle.truffle.api.CompilerDirectives;
import com.oracle.truffle.api.CompilerDirectives.TruffleBoundary;
import com.oracle.truffle.api.Truffle;
import com.oracle.truffle.api.TruffleLanguage;
import com.oracle.truffle.api.TruffleLogger;
import com.oracle.truffle.api.TruffleStackTrace;
import com.oracle.truffle.api.TruffleStackTraceElement;
import com.oracle.truffle.api.exception.AbstractTruffleException;
import com.oracle.truffle.api.frame.VirtualFrame;
import com.oracle.truffle.api.interop.ExceptionType;
import com.oracle.truffle.api.interop.InteropException;
import com.oracle.truffle.api.interop.InteropLibrary;
import com.oracle.truffle.api.interop.InvalidArrayIndexException;
import com.oracle.truffle.api.interop.TruffleObject;
import com.oracle.truffle.api.interop.UnsupportedMessageException;
import com.oracle.truffle.api.library.ExportLibrary;
import com.oracle.truffle.api.library.ExportMessage;
import com.oracle.truffle.api.nodes.ControlFlowException;
import com.oracle.truffle.api.nodes.DirectCallNode;
import com.oracle.truffle.api.nodes.ExplodeLoop;
import com.oracle.truffle.api.nodes.Node;
import com.oracle.truffle.api.nodes.RootNode;
import com.oracle.truffle.api.profiles.BranchProfile;
import com.oracle.truffle.api.source.SourceSection;
import com.oracle.truffle.api.test.polyglot.AbstractPolyglotTest;
import com.oracle.truffle.api.test.polyglot.ProxyLanguage;
import com.oracle.truffle.tck.tests.TruffleTestAssumptions;
public class TruffleExceptionTest extends AbstractPolyglotTest {
@BeforeClass
public static void runWithWeakEncapsulationOnly() {
TruffleTestAssumptions.assumeWeakEncapsulation();
}
private VerifyingHandler verifyingHandler;
@Before
public void setUp() {
verifyingHandler = new VerifyingHandler(AbstractTruffleException.class);
}
@Test
public void testTruffleException() {
setupEnv(createContext(verifyingHandler), new ProxyLanguage() {
@Override
protected CallTarget parse(TruffleLanguage.ParsingRequest request) throws Exception {
return createAST(AbstractTruffleException.class, languageInstance, (n) -> new TruffleExceptionImpl("Test exception", n), false);
}
});
verifyingHandler.expect(BlockNode.Kind.TRY, BlockNode.Kind.CATCH, BlockNode.Kind.FINALLY);
context.eval(ProxyLanguage.ID, "Test");
}
@Test
public void testTruffleExceptionCustomGuestObject() {
setupEnv(createContext(verifyingHandler), new ProxyLanguage() {
@Override
protected CallTarget parse(TruffleLanguage.ParsingRequest request) throws Exception {
return createAST(AbstractTruffleException.class, languageInstance, (n) -> new TruffleExceptionImpl("Test exception", n), true);
}
});
verifyingHandler.expect(BlockNode.Kind.TRY, BlockNode.Kind.CATCH, BlockNode.Kind.FINALLY);
context.eval(ProxyLanguage.ID, "Test");
}
@Test
public void testPolyglotStackTrace() {
testStackTraceImpl(new ProxyLanguage() {
@Override
protected CallTarget parse(TruffleLanguage.ParsingRequest request) throws Exception {
ThrowNode throwNode = new ThrowNode((n) -> {
return new TruffleExceptionImpl("Test exception", n);
});
return new TestRootNode(languageInstance, "test", null, throwNode).getCallTarget();
}
},
"<proxyLanguage> test",
"(org.graalvm.sdk/)?org.graalvm.polyglot.Context.eval");
}
@Test
public void testPolyglotStackTrace2() {
testStackTraceImpl(new ProxyLanguage() {
@Override
protected CallTarget parse(TruffleLanguage.ParsingRequest request) throws Exception {
ThrowNode throwNode = new ThrowNode((n) -> {
return new TruffleExceptionImpl("Test exception", n);
});
CallTarget throwTarget = new TestRootNode(languageInstance, "test-throw", null, throwNode).getCallTarget();
CallTarget innerInvokeTarget = new TestRootNode(languageInstance, "test-call-inner", null, new InvokeNode(throwTarget)).getCallTarget();
CallTarget outerInvokeTarget = new TestRootNode(languageInstance, "test-call-outer", null, new InvokeNode(innerInvokeTarget)).getCallTarget();
return outerInvokeTarget;
}
},
"<proxyLanguage> test-throw",
"<proxyLanguage> test-call-inner",
"<proxyLanguage> test-call-outer",
"(org.graalvm.sdk/)?org.graalvm.polyglot.Context.eval");
}
@Test
public void testPolyglotStackTraceInternalFrame() {
testStackTraceImpl(new ProxyLanguage() {
@Override
protected CallTarget parse(TruffleLanguage.ParsingRequest request) throws Exception {
ThrowNode throwNode = new ThrowNode((n) -> {
return new TruffleExceptionImpl("Test exception", n);
});
CallTarget throwTarget = new TestRootNode(languageInstance, "test-throw-internal", null, true, throwNode).getCallTarget();
CallTarget innerInvokeTarget = new TestRootNode(languageInstance, "test-call-inner", null, new InvokeNode(throwTarget)).getCallTarget();
CallTarget internalInvokeTarget = new TestRootNode(languageInstance, "test-call-internal", null, true, new InvokeNode(innerInvokeTarget)).getCallTarget();
CallTarget outerInvokeTarget = new TestRootNode(languageInstance, "test-call-outer", null, new InvokeNode(internalInvokeTarget)).getCallTarget();
return outerInvokeTarget;
}
},
"<proxyLanguage> test-call-inner",
"<proxyLanguage> test-call-outer",
"(org.graalvm.sdk/)?org.graalvm.polyglot.Context.eval");
}
@Test
public void testPolyglotStackTraceExplicitFillIn() {
testStackTraceImpl(new ProxyLanguage() {
@Override
protected CallTarget parse(TruffleLanguage.ParsingRequest request) throws Exception {
ThrowNode throwNode = new ThrowNode((n) -> {
TruffleExceptionImpl e = new TruffleExceptionImpl("Test exception", n);
TruffleStackTrace.fillIn(e);
return e;
});
return new TestRootNode(languageInstance, "test", null, throwNode).getCallTarget();
}
},
"<proxyLanguage> test",
"(org.graalvm.sdk/)?org.graalvm.polyglot.Context.eval");
}
@Test
public void testPolyglotStackTraceInternalError() {
testStackTraceImpl(new ProxyLanguage() {
@Override
protected CallTarget parse(TruffleLanguage.ParsingRequest request) throws Exception {
ThrowNode throwNode = new ThrowNode(new InternalExceptionFactory());
return new TestRootNode(languageInstance, "test", null, throwNode).getCallTarget();
}
},
Pattern.quote("com.oracle.truffle.api.test.TruffleExceptionTest$InternalExceptionFactory.apply"),
Pattern.quote("com.oracle.truffle.api.test.TruffleExceptionTest$ThrowNode.executeVoid"),
Pattern.quote("com.oracle.truffle.api.test.TruffleExceptionTest$TestRootNode.execute"),
"<proxyLanguage> test",
"(org.graalvm.sdk/)?org.graalvm.polyglot.Context.eval");
}
@Test
public void testExceptionFromCreateContext() {
String message = "Failed to create";
ExceptionType type = ExceptionType.EXIT;
assertFails(() -> setupEnv(Context.create(), new ProxyLanguage() {
@Override
protected LanguageContext createContext(Env env) {
throw new TruffleExceptionImpl(message, null, type, null);
}
}), PolyglotException.class, (pe) -> {
Assert.assertEquals(message, pe.getMessage());
Assert.assertTrue(pe.isExit());
Assert.assertFalse(pe.isInternalError());
Assert.assertEquals(0, pe.getExitStatus());
Assert.assertNull(pe.getGuestObject());
});
}
private void testStackTraceImpl(ProxyLanguage proxy, String... patterns) {
setupEnv(Context.create(), proxy);
assertFails(() -> context.eval(ProxyLanguage.ID, "Test"), PolyglotException.class, (pe) -> {
verifyStackTrace(pe, patterns);
});
}
static void verifyStackTrace(PolyglotException pe, String... patterns) {
StringWriter buffer = new StringWriter();
try (PrintWriter out = new PrintWriter(buffer)) {
pe.printStackTrace(out);
}
String[] lines = Arrays.stream(buffer.toString().split(System.lineSeparator())).map((l) -> l.trim()).filter((l) -> l.startsWith("at ")).map((l) -> {
int end = l.lastIndexOf('(');
if (end < 0) {
end = l.length();
}
return l.substring(3, end);
}).toArray((len) -> new String[len]);
Assert.assertTrue("Not enough lines " + Arrays.toString(lines), patterns.length <= lines.length);
for (int i = 0; i < lines.length && i < patterns.length; i++) {
String line = lines[i];
Pattern pattern = Pattern.compile(patterns[i]);
Assert.assertTrue("Expected " + patterns[i] + " but got " + line, pattern.matcher(line).matches());
}
}
@Test
public void testExceptionFromPolyglotExceptionConstructor() {
testExceptionFromPolyglotExceptionConstructorImpl(ExceptionType.RUNTIME_ERROR, false);
testExceptionFromPolyglotExceptionConstructorImpl(ExceptionType.RUNTIME_ERROR, true, TruffleExceptionImpl.MessageKind.IS_EXCEPTION);
testExceptionFromPolyglotExceptionConstructorImpl(ExceptionType.RUNTIME_ERROR, true, TruffleExceptionImpl.MessageKind.GET_EXCEPTION_TYPE);
testExceptionFromPolyglotExceptionConstructorImpl(ExceptionType.EXIT, true, TruffleExceptionImpl.MessageKind.GET_EXCEPTION_EXIT_STATUS);
testExceptionFromPolyglotExceptionConstructorImpl(ExceptionType.PARSE_ERROR, true, TruffleExceptionImpl.MessageKind.IS_EXCEPTION_INCOMPLETE_SOURCE);
testExceptionFromPolyglotExceptionConstructorImpl(ExceptionType.RUNTIME_ERROR, true, TruffleExceptionImpl.MessageKind.HAS_SOURCE_LOCATION);
testExceptionFromPolyglotExceptionConstructorImpl(ExceptionType.RUNTIME_ERROR, true, TruffleExceptionImpl.MessageKind.GET_SOURCE_LOCATION);
}
private void testExceptionFromPolyglotExceptionConstructorImpl(ExceptionType type, boolean internal, TruffleExceptionImpl.MessageKind... failOn) {
setupEnv(Context.create(), new ProxyLanguage() {
@Override
protected CallTarget parse(TruffleLanguage.ParsingRequest request) throws Exception {
ThrowNode throwNode = new ThrowNode((n) -> new TruffleExceptionImpl("test", n, type, new InjectException(failOn)));
return new TestRootNode(languageInstance, "test", "unnamed", throwNode).getCallTarget();
}
});
assertFails(() -> context.eval(ProxyLanguage.ID, "Test"), PolyglotException.class, (pe) -> {
Assert.assertEquals(internal, pe.isInternalError());
});
}
static Context createContext(VerifyingHandler handler) {
return Context.newBuilder().option(String.format("log.%s.level", handler.loggerName), "FINE").logHandler(handler).build();
}
static CallTarget createAST(Class<?> testClass, TruffleLanguage<ProxyLanguage.LanguageContext> lang,
ExceptionFactory exceptionObjectFactroy, boolean customStackTraceElementGuestObject) {
ThrowNode throwNode = new ThrowNode(exceptionObjectFactroy);
TryCatchNode tryCatch = new TryCatchNode(new BlockNode(testClass, BlockNode.Kind.TRY, throwNode),
new BlockNode(testClass, BlockNode.Kind.CATCH),
new BlockNode(testClass, BlockNode.Kind.FINALLY));
return new TestRootNode(lang, "test", customStackTraceElementGuestObject ? "unnamed" : null, tryCatch).getCallTarget();
}
@SuppressWarnings({"unchecked", "unused"})
static <T extends Throwable> T sthrow(Class<T> type, Throwable t) throws T {
throw (T) t;
}
static final class TestRootNode extends RootNode {
private final String name;
private final String ownerName;
private final boolean internal;
private final StackTraceElementGuestObject customStackTraceElementGuestObject;
@Child StatementNode body;
TestRootNode(TruffleLanguage<?> language, String name, String ownerName, StatementNode body) {
this(language, name, ownerName, false, body);
}
TestRootNode(TruffleLanguage<?> language, String name, String ownerName, boolean internal, StatementNode body) {
super(language);
this.name = name;
this.ownerName = ownerName;
this.internal = internal;
this.body = body;
this.customStackTraceElementGuestObject = ownerName != null ? new StackTraceElementGuestObject(name, ownerName) : null;
}
@Override
public String getQualifiedName() {
return ownerName != null ? ownerName + '.' + name : name;
}
@Override
public String getName() {
return name;
}
@Override
public Object execute(VirtualFrame frame) {
body.executeVoid(frame);
return true;
}
@Override
protected Object translateStackTraceElement(TruffleStackTraceElement element) {
if (customStackTraceElementGuestObject != null) {
return customStackTraceElementGuestObject;
} else {
return super.translateStackTraceElement(element);
}
}
@Override
public boolean isInternal() {
return internal;
}
}
@ExportLibrary(InteropLibrary.class)
static final class StackTraceElementGuestObject implements TruffleObject {
private final String name;
private final Object owner;
StackTraceElementGuestObject(String name, String ownerName) {
this.name = name;
this.owner = new OwnerMetaObject(ownerName);
}
@ExportMessage
@SuppressWarnings("static-method")
boolean hasExecutableName() {
return true;
}
@ExportMessage
Object getExecutableName() {
return name;
}
@ExportMessage
@SuppressWarnings("static-method")
boolean hasDeclaringMetaObject() {
return true;
}
@ExportMessage
Object getDeclaringMetaObject() {
return owner;
}
@ExportLibrary(InteropLibrary.class)
static final class OwnerMetaObject implements TruffleObject {
private final String name;
OwnerMetaObject(String name) {
this.name = name;
}
@ExportMessage
@SuppressWarnings("static-method")
boolean isMetaObject() {
return true;
}
@ExportMessage
@SuppressWarnings({"static-method", "unused"})
boolean isMetaInstance(Object object) {
return false;
}
@ExportMessage
Object getMetaQualifiedName() {
return name;
}
@ExportMessage
Object getMetaSimpleName() {
return name;
}
}
}
abstract static class StatementNode extends Node {
abstract void executeVoid(VirtualFrame frame);
}
static class BlockNode extends StatementNode {
enum Kind {
TRY,
CATCH,
FINALLY
}
@Children private StatementNode[] children;
BlockNode(Class<?> testClass, Kind kind, StatementNode... children) {
this.children = new StatementNode[children.length + 1];
this.children[0] = new LogNode(testClass, kind.name());
System.arraycopy(children, 0, this.children, 1, children.length);
}
@Override
@ExplodeLoop
void executeVoid(VirtualFrame frame) {
for (StatementNode child : children) {
child.executeVoid(frame);
}
}
}
private static class LogNode extends StatementNode {
private final TruffleLogger log;
private final String message;
LogNode(Class<?> testClass, String message) {
log = TruffleLogger.getLogger(ProxyLanguage.ID, testClass.getName());
this.message = message;
}
@Override
void executeVoid(VirtualFrame frame) {
log.fine(message);
}
}
private static final class TryCatchNode extends StatementNode {
@Child private BlockNode block;
@Child private BlockNode catchBlock;
@Child private BlockNode finallyBlock;
@Child private InteropLibrary exceptions = InteropLibrary.getFactory().createDispatched(5);
private final BranchProfile exceptionProfile = BranchProfile.create();
TryCatchNode(BlockNode block, BlockNode catchBlock, BlockNode finallyBlock) {
this.block = block;
this.catchBlock = catchBlock;
this.finallyBlock = finallyBlock;
}
@Override
void executeVoid(VirtualFrame frame) {
Throwable exception = null;
try {
block.executeVoid(frame);
} catch (Throwable ex) {
exception = executeCatchBlock(frame, ex, catchBlock);
}
// Java finally blocks that execute nodes are not allowed for
// compilation as code in finally blocks is duplicated
// by the Java bytecode compiler. This can lead to
// exponential code growth in worst cases.
if (finallyBlock != null) {
finallyBlock.executeVoid(frame);
}
if (exception != null) {
if (exception instanceof ControlFlowException) {
throw (ControlFlowException) exception;
}
try {
throw exceptions.throwException(exception);
} catch (UnsupportedMessageException ie) {
throw CompilerDirectives.shouldNotReachHere(ie);
}
}
}
@SuppressWarnings("unchecked")
private <T extends Throwable> Throwable executeCatchBlock(VirtualFrame frame, Throwable ex, BlockNode catchBlk) throws T {
if (ex instanceof ControlFlowException) {
// run finally blocks for control flow
return ex;
}
exceptionProfile.enter();
if (exceptions.isException(ex)) {
assertTruffleExceptionProperties(ex);
if (catchBlk != null) {
try {
catchBlk.executeVoid(frame);
return null;
} catch (Throwable catchEx) {
return executeCatchBlock(frame, catchEx, null);
}
} else {
// run finally blocks for any interop exception
return ex;
}
} else {
// do not run finally blocks for internal errors or unwinds
throw (T) ex;
}
}
@TruffleBoundary
private void assertTruffleExceptionProperties(Throwable ex) {
try {
Assert.assertEquals(ExceptionType.RUNTIME_ERROR, exceptions.getExceptionType(ex));
AbstractPolyglotTest.assertFails(() -> {
exceptions.getExceptionExitStatus(ex);
return null;
}, UnsupportedMessageException.class);
if (ex.getMessage() != null) {
Assert.assertTrue(exceptions.hasExceptionMessage(ex));
Assert.assertEquals(ex.getMessage(), exceptions.getExceptionMessage(ex));
} else {
Assert.assertFalse(exceptions.hasExceptionMessage(ex));
}
assertStackTrace(ex);
} catch (InteropException ie) {
CompilerDirectives.shouldNotReachHere(ie);
}
}
private void assertStackTrace(Throwable t) throws UnsupportedMessageException, InvalidArrayIndexException {
List<TruffleStackTraceElement> stack = TruffleStackTrace.getStackTrace(t);
Object stackGuestObject = exceptions.getExceptionStackTrace(t);
Assert.assertTrue(exceptions.hasArrayElements(stackGuestObject));
Assert.assertEquals(stack.size(), exceptions.getArraySize(stackGuestObject));<|fim▁hole|> verifyStackTraceElementGuestObject(stackTraceElementObject);
Assert.assertTrue(exceptions.hasExecutableName(stackTraceElementObject));
String executableName = exceptions.asString(exceptions.getExecutableName(stackTraceElementObject));
Assert.assertEquals(stack.get(i).getTarget().getRootNode().getName(), executableName);
String qualifiedName;
if (exceptions.hasDeclaringMetaObject(stackTraceElementObject)) {
qualifiedName = exceptions.asString(exceptions.getMetaQualifiedName(exceptions.getDeclaringMetaObject(stackTraceElementObject))) + '.' + executableName;
} else {
qualifiedName = executableName;
}
Assert.assertEquals(stack.get(i).getTarget().getRootNode().getQualifiedName(), qualifiedName);
}
}
}
interface ExceptionFactory {
Object apply(Node t);
}
static final class InternalExceptionFactory implements ExceptionFactory {
@Override
public Object apply(Node t) {
CompilerDirectives.transferToInterpreter();
throw new RuntimeException();
}
}
static class ThrowNode extends StatementNode {
private final ExceptionFactory exceptionObjectFactory;
@Child InteropLibrary interop;
ThrowNode(ExceptionFactory exceptionObjectFactroy) {
this.exceptionObjectFactory = exceptionObjectFactroy;
this.interop = InteropLibrary.getFactory().createDispatched(1);
}
@Override
void executeVoid(VirtualFrame frame) {
try {
throw interop.throwException(exceptionObjectFactory.apply(this));
} catch (UnsupportedMessageException um) {
throw CompilerDirectives.shouldNotReachHere(um);
}
}
}
static class InvokeNode extends StatementNode {
private final DirectCallNode call;
InvokeNode(CallTarget target) {
this.call = Truffle.getRuntime().createDirectCallNode(target);
}
@Override
void executeVoid(VirtualFrame frame) {
this.call.call();
}
}
@SuppressWarnings("serial")
@ExportLibrary(InteropLibrary.class)
static final class TruffleExceptionImpl extends AbstractTruffleException {
enum MessageKind {
IS_EXCEPTION,
THROW_EXCEPTION,
GET_EXCEPTION_TYPE,
GET_EXCEPTION_EXIT_STATUS,
IS_EXCEPTION_INCOMPLETE_SOURCE,
HAS_SOURCE_LOCATION,
GET_SOURCE_LOCATION
}
private final ExceptionType exceptionType;
private final Consumer<MessageKind> exceptionInjection;
TruffleExceptionImpl(String message, Node location) {
this(message, location, ExceptionType.RUNTIME_ERROR, null);
}
TruffleExceptionImpl(
String message,
Node location,
ExceptionType exceptionType,
Consumer<MessageKind> exceptionInjection) {
super(message, location);
this.exceptionType = exceptionType;
this.exceptionInjection = exceptionInjection;
}
@ExportMessage
boolean isException() {
injectException(MessageKind.IS_EXCEPTION);
return true;
}
@ExportMessage
RuntimeException throwException() {
injectException(MessageKind.THROW_EXCEPTION);
throw this;
}
@ExportMessage
ExceptionType getExceptionType() {
injectException(MessageKind.GET_EXCEPTION_TYPE);
return exceptionType;
}
@ExportMessage
int getExceptionExitStatus() throws UnsupportedMessageException {
injectException(MessageKind.GET_EXCEPTION_EXIT_STATUS);
if (exceptionType != ExceptionType.EXIT) {
throw UnsupportedMessageException.create();
} else {
return 0;
}
}
@ExportMessage
boolean isExceptionIncompleteSource() throws UnsupportedMessageException {
injectException(MessageKind.IS_EXCEPTION_INCOMPLETE_SOURCE);
if (exceptionType != ExceptionType.PARSE_ERROR) {
throw UnsupportedMessageException.create();
} else {
return true;
}
}
@ExportMessage
boolean hasSourceLocation() {
injectException(MessageKind.HAS_SOURCE_LOCATION);
Node location = getLocation();
return location != null && location.getEncapsulatingSourceSection() != null;
}
@ExportMessage(name = "getSourceLocation")
SourceSection getSource() throws UnsupportedMessageException {
injectException(MessageKind.GET_SOURCE_LOCATION);
Node location = getLocation();
SourceSection section = location == null ? null : location.getEncapsulatingSourceSection();
if (section == null) {
throw UnsupportedMessageException.create();
} else {
return section;
}
}
@TruffleBoundary
private void injectException(MessageKind messageKind) {
if (exceptionInjection != null) {
exceptionInjection.accept(messageKind);
}
}
}
private static final class InjectException implements Consumer<TruffleExceptionImpl.MessageKind> {
private final Set<TruffleExceptionImpl.MessageKind> messages;
private InjectException(TruffleExceptionImpl.MessageKind... messages) {
this.messages = EnumSet.noneOf(TruffleExceptionImpl.MessageKind.class);
Collections.addAll(this.messages, messages);
}
@Override
public void accept(TruffleExceptionImpl.MessageKind kind) {
if (messages.contains(kind)) {
throw new RuntimeException();
}
}
}
static final class VerifyingHandler extends Handler {
final String loggerName;
private Queue<String> expected = new ArrayDeque<>();
VerifyingHandler(Class<?> testClass) {
loggerName = String.format("%s.%s", ProxyLanguage.ID, testClass.getName());
}
void expect(BlockNode.Kind... kinds) {
Arrays.stream(kinds).map(BlockNode.Kind::name).forEach(expected::add);
}
@Override
public void publish(LogRecord lr) {
if (loggerName.equals(lr.getLoggerName())) {
String head = expected.remove();
Assert.assertEquals(head, lr.getMessage());
}
}
@Override
public void flush() {
}
@Override
public void close() {
Assert.assertTrue("All expected events must be consumed. Remaining events: " + String.join(", ", expected), expected.isEmpty());
}
}
}<|fim▁end|> | for (int i = 0; i < stack.size(); i++) {
Object stackTraceElementObject = exceptions.readArrayElement(stackGuestObject, i); |
<|file_name|>test_case_operands.py<|end_file_name|><|fim▁begin|>import pytest, sys, os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../")
from unittest import TestCase
from pylogic.case import Case
class TestBaseOperand(TestCase):
def test_eq_case(self):
case1 = Case("parent", "homer", "bart")<|fim▁hole|> def test_not_eq_case1(self):
case1 = Case("parent", "homer", "bart")
case2 = Case("parent", "homer", "lisa")
assert case1 != case2
def test_not_eq_case2(self):
case1 = Case("parent", "homer", "bart")
case2 = Case("brother", "homer", "lisa")
assert case1 != case2<|fim▁end|> | case2 = Case("parent", "homer", "bart")
assert case1 == case2
|
<|file_name|>git_cl_unittest.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from webkitpy.common.net.git_cl import GitCL
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.host_mock import MockHost
class GitCLTest(unittest.TestCase):
def test_run(self):
host = MockHost()
host.executive = MockExecutive2(output='mock-output')
git_cl = GitCL(host)
output = git_cl.run(['command'])
self.assertEqual(output, 'mock-output')
self.assertEqual(host.executive.calls, [['git', 'cl', 'command']])
def test_run_with_auth(self):
host = MockHost()
host.executive = MockExecutive2(output='mock-output')
git_cl = GitCL(host, auth_refresh_token_json='token.json')
git_cl.run(['upload'])
self.assertEqual(
host.executive.calls,
[['git', 'cl', 'upload', '--auth-refresh-token-json', 'token.json']])
def test_some_commands_not_run_with_auth(self):
host = MockHost()
host.executive = MockExecutive2(output='mock-output')
git_cl = GitCL(host, auth_refresh_token_json='token.json')
git_cl.run(['issue'])
self.assertEqual(host.executive.calls, [['git', 'cl', 'issue']])
def test_get_issue_number(self):
host = MockHost()
host.executive = MockExecutive2(output='Issue number: 12345 (http://crrev.com/12345)')
git_cl = GitCL(host)
self.assertEqual(git_cl.get_issue_number(), '12345')
def test_get_issue_number_none(self):
host = MockHost()
host.executive = MockExecutive2(output='Issue number: None (None)')
git_cl = GitCL(host)
self.assertEqual(git_cl.get_issue_number(), 'None')
def test_all_jobs_finished_empty(self):
self.assertTrue(GitCL.all_jobs_finished([]))<|fim▁hole|>
def test_all_jobs_finished_with_started_jobs(self):
self.assertFalse(GitCL.all_jobs_finished([
{
'builder_name': 'some-builder',
'status': 'COMPLETED',
'result': 'FAILURE',
},
{
'builder_name': 'some-builder',
'status': 'STARTED',
'result': None,
},
]))
def test_all_jobs_finished_only_completed_jobs(self):
self.assertTrue(GitCL.all_jobs_finished([
{
'builder_name': 'some-builder',
'status': 'COMPLETED',
'result': 'FAILURE',
},
{
'builder_name': 'some-builder',
'status': 'COMPLETED',
'result': 'SUCCESS',
},
]))
def test_has_failing_try_results_empty(self):
self.assertFalse(GitCL.has_failing_try_results([]))
def test_has_failing_try_results_only_success_and_started(self):
self.assertFalse(GitCL.has_failing_try_results([
{
'builder_name': 'some-builder',
'status': 'COMPLETED',
'result': 'SUCCESS',
},
{
'builder_name': 'some-builder',
'status': 'STARTED',
'result': None,
},
]))
def test_has_failing_try_results_with_failing_results(self):
self.assertTrue(GitCL.has_failing_try_results([
{
'builder_name': 'some-builder',
'status': 'COMPLETED',
'result': 'FAILURE',
},
]))<|fim▁end|> | |
<|file_name|>LuceneTestFixture.cpp<|end_file_name|><|fim▁begin|>/////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2009-2011 Alan Wright. All rights reserved.
// Distributable under the terms of either the Apache License (Version 2.0)
// or the GNU Lesser General Public License.
/////////////////////////////////////////////////////////////////////////////
#include "TestInc.h"
#include "LuceneTestFixture.h"
#include "ConcurrentMergeScheduler.h"
#include "DateTools.h"
namespace Lucene
{
LuceneTestFixture::LuceneTestFixture()
{
DateTools::setDateOrder(DateTools::DATEORDER_LOCALE);
ConcurrentMergeScheduler::setTestMode();
}
LuceneTestFixture::~LuceneTestFixture()
{<|fim▁hole|> {
// Clear the failure so that we don't just keep failing subsequent test cases
ConcurrentMergeScheduler::clearUnhandledExceptions();
BOOST_FAIL("ConcurrentMergeScheduler hit unhandled exceptions");
}
}
}<|fim▁end|> | DateTools::setDateOrder(DateTools::DATEORDER_LOCALE);
if (ConcurrentMergeScheduler::anyUnhandledExceptions()) |
<|file_name|>web_media_stream_audio_sink.cc<|end_file_name|><|fim▁begin|>// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/public/platform/modules/mediastream/web_media_stream_audio_sink.h"
#include "base/check.h"
#include "third_party/blink/public/platform/web_media_stream_source.h"
#include "third_party/blink/public/platform/web_media_stream_track.h"
#include "third_party/blink/renderer/platform/mediastream/media_stream_audio_track.h"
namespace blink {
void WebMediaStreamAudioSink::AddToAudioTrack(
WebMediaStreamAudioSink* sink,
const blink::WebMediaStreamTrack& track) {<|fim▁hole|> MediaStreamAudioTrack* native_track = MediaStreamAudioTrack::From(track);
DCHECK(native_track);
native_track->AddSink(sink);
}
void WebMediaStreamAudioSink::RemoveFromAudioTrack(
WebMediaStreamAudioSink* sink,
const blink::WebMediaStreamTrack& track) {
MediaStreamAudioTrack* native_track = MediaStreamAudioTrack::From(track);
DCHECK(native_track);
native_track->RemoveSink(sink);
}
media::AudioParameters WebMediaStreamAudioSink::GetFormatFromAudioTrack(
const blink::WebMediaStreamTrack& track) {
MediaStreamAudioTrack* native_track = MediaStreamAudioTrack::From(track);
DCHECK(native_track);
return native_track->GetOutputFormat();
}
} // namespace blink<|fim▁end|> | DCHECK(track.Source().GetType() == blink::WebMediaStreamSource::kTypeAudio); |
<|file_name|>ko.js<|end_file_name|><|fim▁begin|>/*
<|fim▁hole|>Copyright (c) 2003-2017, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'sourcearea', 'ko', {
toolbar: '소스'
} );<|fim▁end|> | |
<|file_name|>dataviz-spec.js<|end_file_name|><|fim▁begin|>/* globals: sinon */
var expect = require("chai").expect;
var Keen = require("../../../../src/core"),
keenHelper = require("../../helpers/test-config");
describe("Keen.Dataviz", function(){
beforeEach(function(){
this.project = new Keen({
projectId: keenHelper.projectId,
readKey: keenHelper.readKey
});
this.query = new Keen.Query("count", {
eventCollection: "test-collection"
});
this.dataviz = new Keen.Dataviz();
// console.log(this.dataviz);
});
afterEach(function(){
this.project = null;
this.query = null;
this.dataviz = null;
Keen.Dataviz.visuals = new Array();
});
describe("constructor", function(){
it("should create a new Keen.Dataviz instance", function(){
expect(this.dataviz).to.be.an.instanceof(Keen.Dataviz);
});
it("should contain a new Keen.Dataset instance", function(){
expect(this.dataviz.dataset).to.be.an.instanceof(Keen.Dataset);
});
it("should contain a view object", function(){
expect(this.dataviz.view).to.be.an("object");
});
it("should contain view attributes matching Keen.Dataviz.defaults", function(){
expect(this.dataviz.view.attributes).to.deep.equal(Keen.Dataviz.defaults);
});
it("should contain view defaults also matching Keen.Dataviz.defaults", function(){
expect(this.dataviz.view.defaults).to.deep.equal(Keen.Dataviz.defaults);
});
it("should be appended to Keen.Dataviz.visuals", function(){
expect(Keen.Dataviz.visuals).to.have.length(1);
expect(Keen.Dataviz.visuals[0]).and.to.be.an.instanceof(Keen.Dataviz);
});
});
describe("#attributes", function(){
it("should get the current properties", function(){
expect(this.dataviz.attributes()).to.deep.equal(Keen.Dataviz.defaults);
});
it("should set a hash of properties", function(){
this.dataviz.attributes({ title: "Updated Attributes", width: 600 });
expect(this.dataviz.view.attributes.title).to.be.a("string")
.and.to.eql("Updated Attributes");
expect(this.dataviz.view.attributes.width).to.be.a("number")
.and.to.eql(600);
});
it("should unset properties by passing null", function(){
this.dataviz.adapter({ height: null });
expect(this.dataviz.view.adapter.height).to.not.exist;
});
});
// it("should", function(){});
describe("#colors", function(){<|fim▁hole|> expect(this.dataviz.colors()).to.be.an("array")
.and.to.eql(Keen.Dataviz.defaults.colors);
});
it("should set a new array of colors", function(){
var array = ["red","green","blue"];
this.dataviz.colors(array);
expect(this.dataviz.colors()).to.be.an("array")
.and.to.have.length(3)
.and.to.eql(array);
});
it("should unset the colors set by passing null", function(){
var array = ["red","green","blue"];
this.dataviz.colors(array);
this.dataviz.colors(null);
expect(this.dataviz.colors()).to.not.exist;
});
});
describe("#colorMapping", function(){
it("should return undefined by default", function(){
expect(this.dataviz.colorMapping()).to.be.an("undefined");
});
it("should set and get a hash of properties", function(){
var hash = { "A": "#aaa", "B": "#bbb" };
this.dataviz.colorMapping(hash);
expect(this.dataviz.colorMapping()).to.be.an("object")
.and.to.deep.equal(hash);
});
it("should unset a property by passing null", function(){
var hash = { "A": "#aaa", "B": "#bbb" };
this.dataviz.colorMapping(hash);
expect(this.dataviz.colorMapping().A).to.be.a("string")
.and.to.eql("#aaa");
this.dataviz.colorMapping({ "A": null });
expect(this.dataviz.colorMapping().A).to.not.exist;
});
});
describe("#labels", function(){
it("should return an empty array by default", function(){
expect(this.dataviz.labels()).to.be.an("array").and.to.have.length(0);
});
it("should set and get a new array of labels", function(){
var array = ["A","B","C"];
this.dataviz.labels(array);
expect(this.dataviz.labels()).to.be.an("array")
.and.to.have.length(3)
.and.to.eql(array);
});
it("should unset the labels set by passing null", function(){
var array = ["A","B","C"];
this.dataviz.labels(array);
this.dataviz.labels(null);
expect(this.dataviz.labels()).to.be.an("array").and.to.have.length(0);
});
});
describe("#labelMapping", function(){
it("should return undefined by default", function(){
expect(this.dataviz.labelMapping()).to.be.an("undefined");
});
it("should set and get a hash of properties", function(){
var hash = { "_a_": "A", "_b_": "B" };
this.dataviz.labelMapping(hash);
expect(this.dataviz.labelMapping()).to.be.an("object")
.and.to.deep.equal(hash);
});
it("should unset a property by passing null", function(){
var hash = { "_a_": "A", "_b_": "B" };
this.dataviz.labelMapping(hash);
expect(this.dataviz.labelMapping()._a_).to.be.a("string")
.and.to.eql("A");
this.dataviz.labelMapping({ "_a_": null });
expect(this.dataviz.labelMapping()._a_).to.not.exist;
});
it("should provide full text replacement of categorical values", function(){
var num_viz = new Keen.Dataviz()
.call(function(){
this.dataset.output([
[ "Index", "Count" ],
[ "Sunday", 10 ],
[ "Monday", 11 ],
[ "Tuesday", 12 ],
[ "Wednesday", 13 ]
]);
this.dataset.meta.schema = { records: "result", select: true };
this.dataType("categorical");
})
.labelMapping({
"Sunday" : "Sun",
"Monday" : "Mon",
"Tuesday" : "Tues"
});
expect(num_viz.dataset.output()[1][0]).to.be.a("string")
.and.to.eql("Sun");
expect(num_viz.dataset.output()[2][0]).to.be.a("string")
.and.to.eql("Mon");
expect(num_viz.dataset.output()[3][0]).to.be.a("string")
.and.to.eql("Tues");
expect(num_viz.dataset.output()[4][0]).to.be.a("string")
.and.to.eql("Wednesday");
});
});
describe("#height", function(){
it("should return undefined by default", function(){
expect(this.dataviz.height()).to.be.an("undefined");
});
it("should set and get a new height", function(){
var height = 375;
this.dataviz.height(height);
expect(this.dataviz.height()).to.be.a("number")
.and.to.eql(height);
});
it("should unset the height by passing null", function(){
this.dataviz.height(null);
expect(this.dataviz.height()).to.not.exist;
});
});
describe("#title", function(){
it("should return undefined by default", function(){
expect(this.dataviz.title()).to.be.an("undefined");
});
it("should set and get a new title", function(){
var title = "New Title";
this.dataviz.title(title);
expect(this.dataviz.title()).to.be.a("string")
.and.to.eql(title);
});
it("should unset the title by passing null", function(){
this.dataviz.title(null);
expect(this.dataviz.title()).to.not.exist;
});
});
describe("#width", function(){
it("should return undefined by default", function(){
expect(this.dataviz.width()).to.be.an("undefined");
});
it("should set and get a new width", function(){
var width = 900;
this.dataviz.width(width);
expect(this.dataviz.width()).to.be.a("number")
.and.to.eql(width);
});
it("should unset the width by passing null", function(){
this.dataviz.width(null);
expect(this.dataviz.width()).to.not.exist;
});
});
describe("#adapter", function(){
it("should get the current adapter properties", function(){
expect(this.dataviz.adapter()).to.be.an("object")
.and.to.contain.keys("library", "chartType", "defaultChartType", "dataType");
expect(this.dataviz.adapter().library).to.be.an("undefined");
expect(this.dataviz.adapter().chartType).to.be.an("undefined");
});
it("should set a hash of properties", function(){
this.dataviz.adapter({ library: "lib2", chartType: "pie" });
expect(this.dataviz.view.adapter.library).to.be.a("string")
.and.to.eql("lib2");
expect(this.dataviz.view.adapter.chartType).to.be.a("string")
.and.to.eql("pie");
});
it("should unset properties by passing null", function(){
this.dataviz.adapter({ library: null });
expect(this.dataviz.view.adapter.library).to.not.exist;
});
});
describe("#library", function(){
it("should return undefined by default", function(){
expect(this.dataviz.library()).to.be.an("undefined");
});
it("should set and get a new library", function(){
var lib = "nvd3";
this.dataviz.library(lib);
expect(this.dataviz.library()).to.be.a("string")
.and.to.eql(lib);
});
it("should unset the library by passing null", function(){
this.dataviz.library(null);
expect(this.dataviz.library()).to.not.exist;
});
});
describe("#chartOptions", function(){
it("should set and get a hash of properties", function(){
var hash = { legend: { position: "none" }, isStacked: true };
this.dataviz.chartOptions(hash);
expect(this.dataviz.view.adapter.chartOptions.legend).to.be.an("object")
.and.to.deep.eql(hash.legend);
expect(this.dataviz.view.adapter.chartOptions.isStacked).to.be.a("boolean")
.and.to.eql(true);
});
it("should unset properties by passing null", function(){
var hash = { legend: { position: "none" }, isStacked: true };
this.dataviz.chartOptions(hash);
this.dataviz.chartOptions({ legend: null });
expect(this.dataviz.view.adapter.chartOptions.legend).to.not.exist;
});
});
describe("#chartType", function(){
it("should return undefined by default", function(){
expect(this.dataviz.chartType()).to.be.an("undefined");
});
it("should set and get a new chartType", function(){
var chartType = "magic-pie"
this.dataviz.chartType(chartType);
expect(this.dataviz.chartType()).to.be.a("string")
.and.to.eql(chartType);
});
it("should unset properties by passing null", function(){
this.dataviz.chartType(null);
expect(this.dataviz.chartType()).to.not.exist;
});
});
describe("#defaultChartType", function(){
it("should return undefined by default", function(){
expect(this.dataviz.defaultChartType()).to.be.an("undefined");
});
it("should set and get a new chartType", function(){
var defaultType = "backup-pie";
this.dataviz.defaultChartType(defaultType);
expect(this.dataviz.defaultChartType()).to.be.a("string")
.and.to.eql(defaultType);
});
it("should unset chartType by passing null", function(){
this.dataviz.defaultChartType(null);
expect(this.dataviz.defaultChartType()).to.not.exist;
});
});
describe("#dataType", function(){
it("should return undefined by default", function(){
expect(this.dataviz.dataType()).to.be.an("undefined");
});
it("should set and get a new dataType", function(){
var dataType = "cat-interval";
this.dataviz.dataType(dataType);
expect(this.dataviz.dataType()).to.be.a("string")
.and.to.eql(dataType);
});
it("should unset dataType by passing null", function(){
this.dataviz.dataType(null);
expect(this.dataviz.dataType()).to.not.exist;
});
});
describe("#el", function(){
beforeEach(function(){
var elDiv = document.createElement("div");
elDiv.id = "chart-test";
document.body.appendChild(elDiv);
});
it("should return undefined by default", function(){
expect(this.dataviz.el()).to.be.an("undefined");
});
it("should set and get a new el", function(){
this.dataviz.el(document.getElementById("chart-test"));
expect(this.dataviz.el()).to.be.an("object");
if (this.dataviz.el().nodeName) {
expect(this.dataviz.el().nodeName).to.be.a("string")
.and.to.eql("DIV");
}
});
it("should unset el by passing null", function(){
this.dataviz.el(null);
expect(this.dataviz.el()).to.not.exist;
});
});
describe("#indexBy", function(){
it("should return \"timeframe.start\" by default", function(){
expect(this.dataviz.indexBy()).to.be.a("string")
.and.to.eql("timeframe.start");
});
it("should set and get a new indexBy property", function(){
this.dataviz.indexBy("timeframe.end");
expect(this.dataviz.indexBy()).to.be.a("string")
.and.to.eql("timeframe.end");
});
it("should revert the property to default value by passing null", function(){
this.dataviz.indexBy(null);
expect(this.dataviz.indexBy()).to.be.a("string")
.and.to.eql(Keen.Dataviz.defaults.indexBy);
});
});
describe("#sortGroups", function(){
it("should return undefined by default", function(){
expect(this.dataviz.sortGroups()).to.be.an("undefined");
});
it("should set and get a new sortGroups property", function(){
this.dataviz.sortGroups("asc");
expect(this.dataviz.sortGroups()).to.be.a("string")
.and.to.eql("asc");
});
it("should unset property by passing null", function(){
this.dataviz.sortGroups(null);
expect(this.dataviz.sortGroups()).to.not.exist;
});
});
describe("#sortIntervals", function(){
it("should return undefined by default", function(){
expect(this.dataviz.sortIntervals()).to.be.an("undefined");
});
it("should set and get a new sortIntervals property", function(){
this.dataviz.sortIntervals("asc");
expect(this.dataviz.sortIntervals()).to.be.a("string")
.and.to.eql("asc");
});
it("should unset property by passing null", function(){
this.dataviz.sortIntervals(null);
expect(this.dataviz.sortIntervals()).to.not.exist;
});
});
describe("#stacked", function(){
it("should return false by default", function(){
expect(this.dataviz.stacked()).to.be.a("boolean").and.to.eql(false);
});
it("should set `stacked` to true by passing true", function(){
this.dataviz.stacked(true);
expect(this.dataviz.stacked()).to.be.a("boolean").and.to.eql(true);
});
it("should set `stacked` to false by passing null", function(){
this.dataviz.stacked(true);
this.dataviz.stacked(null);
expect(this.dataviz.stacked()).to.be.a("boolean").and.to.eql(false);
});
});
describe("#prepare", function(){
it("should set the view._prepared flag to true", function(){
expect(this.dataviz.view._prepared).to.be.false;
this.dataviz.el(document.getElementById("chart-test")).prepare();
expect(this.dataviz.view._prepared).to.be.true;
// terminate the spinner instance
this.dataviz.initialize();
});
});
describe("Adapter actions", function(){
beforeEach(function(){
Keen.Dataviz.register("demo", {
"chart": {
initialize: sinon.spy(),
render: sinon.spy(),
update: sinon.spy(),
destroy: sinon.spy(),
error: sinon.spy()
}
});
this.dataviz.adapter({ library: "demo", chartType: "chart" });
});
describe("#initialize", function(){
it("should call the #initialize method of a given adapter", function(){
this.dataviz.initialize();
expect(Keen.Dataviz.libraries.demo.chart.initialize.called).to.be.ok;
});
it("should set the view._initialized flag to true", function(){
expect(this.dataviz.view._initialized).to.be.false;
this.dataviz.initialize();
expect(this.dataviz.view._initialized).to.be.true;
});
});
describe("#render", function(){
it("should call the #initialize method of a given adapter", function(){
this.dataviz.initialize();
expect(Keen.Dataviz.libraries.demo.chart.initialize.called).to.be.ok;
});
it("should call the #render method of a given adapter", function(){
this.dataviz.el(document.getElementById("chart-test")).render();
expect(Keen.Dataviz.libraries.demo.chart.render.called).to.be.ok;
});
it("should NOT call the #render method if el is NOT set", function(){
this.dataviz.render();
expect(Keen.Dataviz.libraries.demo.chart.render.called).to.not.be.ok;
});
it("should set the view._rendered flag to true", function(){
expect(this.dataviz.view._rendered).to.be.false;
this.dataviz.el(document.getElementById("chart-test")).render();
expect(this.dataviz.view._rendered).to.be.true;
});
});
describe("#update", function(){
it("should call the #update method of a given adapter if available", function(){
this.dataviz.update();
expect(Keen.Dataviz.libraries.demo.chart.update.called).to.be.ok;
});
it("should call the #render method of a given adapter if NOT available", function(){
Keen.Dataviz.libraries.demo.chart.update = void 0;
this.dataviz.el(document.getElementById("chart-test")).update();
expect(Keen.Dataviz.libraries.demo.chart.render.called).to.be.ok;
});
});
describe("#destroy", function(){
it("should call the #destroy method of a given adapter", function(){
this.dataviz.destroy();
expect(Keen.Dataviz.libraries.demo.chart.destroy.called).to.be.ok;
});
});
describe("#error", function(){
it("should call the #error method of a given adapter if available", function(){
this.dataviz.error();
expect(Keen.Dataviz.libraries.demo.chart.error.called).to.be.ok;
});
});
});
});<|fim▁end|> | it("should get the current color set", function(){ |
<|file_name|>malware_bazaar_search.py<|end_file_name|><|fim▁begin|>#python imports
import sys
import os
import time
import datetime
import subprocess
import json
import requests
from termcolor import colored<|fim▁hole|>
#third-party imports
#No third-party imports
#programmer generated imports
from logger import logger
from fileio import fileio
'''
***BEGIN DESCRIPTION***
Type: Search - Description: Searches for any available data on a target against the Abuse.ch Malware Bazaar database.
***END DESCRIPTION***
'''
def POE(POE):
if (POE.logging == True):
LOG = logger()
newlogentry = ''
reputation_dump = ''
reputation_output_data = ''
malwarebazaar = ''
if (POE.logging == True):
newlogentry = 'Module: malware_bazaar_search'
LOG.WriteStrongLog(POE.logdir, POE.targetfilename, newlogentry)
if (POE.SHA256 == ''):
print (colored('\r\n[x] Unable to execute Malware Bazaar Search - hash value must be SHA256.', 'red', attrs=['bold']))
newlogentry = 'Unable to execute Malware Bazaar Search - hash value must be SHA256'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
return -1
global json
query_status = ''
first_seen = ''
last_seen = ''
signature = ''
sig_count = 0
output = POE.logdir + 'MalwareBazaarSearch.json'
FI = fileio()
print (colored('\r\n[*] Running abuse.ch Malware Bazaar Search against: ' + POE.target, 'white', attrs=['bold']))
malwarebazaar = "https://mb-api.abuse.ch/api/v1/" #API URL
data = { #Our header params
'query': 'get_info',
'hash': POE.SHA256,
}
response_dump = requests.post(malwarebazaar, data=data, timeout=15) # Give us the results as JSON
if (POE.debug == True):
print (response_dump)
try:
FI.WriteLogFile(output, response_dump.content.decode("utf-8", "ignore"))
print (colored('[*] Malware Bazaar data had been written to file here: ', 'green') + colored(output, 'blue', attrs=['bold']))
if ((POE.logging == True) and (POE.nolinksummary == False)):
newlogentry = 'Malware Bazaar data has been generated to file here: <a href=\"' + output + '\"> Malware Bazaar Host Output </a>'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except:
print (colored('[x] Unable to write Malware Bazaar data to file', 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'Unable to write Malware Bazaar data to file'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
POE.csv_line += 'N/A,'
return -1
try:
#Open the file we just downloaded
print ('[-] Reading Malware Bazaar file: ' + output.strip())
with open(output.strip(), 'rb') as read_file:
data = json.load(read_file, cls=None)
read_file.close()
# Check what kind of results we have
query_status = data["query_status"]
print ('[*] query_status: ' + query_status)
if (query_status == 'ok'):
with open(output.strip(), 'r') as read_file:
for string in read_file:
if (POE.debug == True):
print ('[DEBUG] string: ' + string.strip())
if ('first_seen' in string):
first_seen = string.strip()
if ('last_seen' in string):
last_seen = string.strip()
if (('signature' in string) and (sig_count == 0)):
signature = string.strip()
sig_count += 1
print ('[*] Sample ' + first_seen.replace(',',''))
print ('[*] Sample ' + last_seen.replace(',',''))
print ('[*] Sample ' + signature.replace(',',''))
if (POE.logging == True):
newlogentry = 'Sample ' + first_seen.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Sample ' + last_seen.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Sample ' + signature.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Can't find anything on this one...
elif (query_status == 'hash_not_found'):
print (colored('[-] The hash value has not been found...', 'yellow', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'No results available for host...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Can't find anything on this one...
elif (query_status == 'no_results'):
print (colored('[-] No results available for host...', 'yellow', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'No results available for host...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Something weird happened...
else:
print (colored('[x] An error has occurred...', 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'An error has occurred...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except Exception as e:
print (colored('[x] Error: ' + str(e) + ' Terminating...', 'red', attrs=['bold']))
read_file.close()
return -1
#Clean up before returning
read_file.close()
return 0<|fim▁end|> | |
<|file_name|>plusminus.py<|end_file_name|><|fim▁begin|># Given code
n = int(input().strip())
arr = [int(arr_temp) for arr_temp in input().strip().split(' ')]
# Start
pos = 0.0
zero = 0.0
neg = 0.0
for i in arr:
if i == 0:
zero += 1<|fim▁hole|> elif i > 0:
pos += 1
else:
neg += 1
print("%.6f" % (pos / n))
print("%.6f" % (neg / n))
print("%.6f" % (zero / n))<|fim▁end|> | |
<|file_name|>realtime.js<|end_file_name|><|fim▁begin|>$(function() {
// When we're using HTTPS, use WSS too.
$('#all_messages').scrollTop($('#all_messages')[0].scrollHeight);
var to_focus = $("#message");
var ws_scheme = window.location.protocol == "https:" ? "wss" : "ws";
var chatsock = new ReconnectingWebSocket(ws_scheme + '://' + window.location.host + "/ws/");
chatsock.onmessage = function(message) {
if($("#no_messages").length){
$("#no_messages").remove();
}
var data = JSON.parse(message.data);
if(data.type == "presence"){
//update lurkers count
lurkers = data.payload.lurkers;
lurkers_ele = document.getElementById("lurkers-count");
lurkers_ele.innerText = lurkers;
<|fim▁hole|> document.getElementById("loggedin-users-count").innerText = user_list.length;
user_list_obj = document.getElementById("user-list");
user_list_obj.innerText = "";
//alert(user_list);
for(var i = 0; i < user_list.length; i++ ){
var user_ele = document.createElement('li');
user_ele.setAttribute('class', 'list-group-item');
user_ele.innerText = user_list[i];
user_list_obj.append(user_ele);
}
return;
}
var chat = $("#chat")
var ele = $('<li class="list-group-item"></li>')
ele.append(
'<strong>'+data.user+'</strong> : ')
ele.append(
data.message)
chat.append(ele)
$('#all_messages').scrollTop($('#all_messages')[0].scrollHeight);
};
$("#chatform").on("submit", function(event) {
var message = {
message: $('#message').val()
}
chatsock.send(JSON.stringify(message));
$("#message").val('').focus();
return false;
});
setInterval(function() {
chatsock.send(JSON.stringify("heartbeat"));
}, 10000);
});<|fim▁end|> | //update logged in users list
user_list = data.payload.members; |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>"""Django module for the OS2datascanner project."""<|fim▁end|> | |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | class PayPalFailure(Exception): pass |
<|file_name|>framedImage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
# coding: utf-8
import gi
gi.require_version('CinnamonDesktop', '3.0')
from gi.repository import Gtk, GdkPixbuf, Gio, GLib, GObject, Gdk
from util import utils, trackers
MAX_IMAGE_SIZE = 320
MAX_IMAGE_SIZE_LOW_RES = 200
class FramedImage(Gtk.Image):
"""
Widget to hold the user face image. It attempts to display an image at
its native size, up to a max sane size.
"""
__gsignals__ = {
"surface-changed": (GObject.SignalFlags.RUN_LAST, None, (object,))
}
def __init__(self, low_res=False, scale_up=False):
super(FramedImage, self).__init__()
self.get_style_context().add_class("framedimage")
self.cancellable = None
self.file = None
self.path = None
self.scale_up = scale_up
if low_res:
self.max_size = MAX_IMAGE_SIZE_LOW_RES
else:
self.max_size = MAX_IMAGE_SIZE
trackers.con_tracker_get().connect(self, "realize", self.on_realized)
def on_realized(self, widget):
self.generate_image()
def clear_image(self):
self.set_from_surface(None)
self.emit("surface-changed", None)
def set_from_path(self, path):
self.path = path
self.file = None
if self.get_realized():
self.generate_image()
def set_from_file(self, file):
self.file = file
self.path = None
if self.get_realized():
self.generate_image()
def set_image_internal(self, path):<|fim▁hole|> try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(path)
except GLib.Error as e:
message = "Could not load pixbuf from '%s' for FramedImage: %s" % (path, e.message)
error = True
if pixbuf != None:
if (pixbuf.get_height() > scaled_max_size or pixbuf.get_width() > scaled_max_size) or \
(self.scale_up and (pixbuf.get_height() < scaled_max_size / 2 or pixbuf.get_width() < scaled_max_size / 2)):
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(path, scaled_max_size, scaled_max_size)
except GLib.Error as e:
message = "Could not scale pixbuf from '%s' for FramedImage: %s" % (path, e.message)
error = True
if pixbuf:
surface = Gdk.cairo_surface_create_from_pixbuf(pixbuf,
self.get_scale_factor(),
self.get_window())
self.set_from_surface(surface)
self.emit("surface-changed", surface)
else:
print(message)
self.clear_image()
def generate_image(self):
if self.path:
self.set_image_internal(self.path)
elif self.file:
if self.cancellable != None:
self.cancellable.cancel()
self.cancellable = None
self.cancellable = Gio.Cancellable()
self.file.load_contents_async(self.cancellable, self.load_contents_async_callback)
def load_contents_async_callback(self, file, result, data=None):
try:
success, contents, etag_out = file.load_contents_finish(result)
except GLib.Error:
self.clear_image()
return
if contents:
cache_name = GLib.build_filenamev([GLib.get_user_cache_dir(), "cinnamon-screensaver-albumart-temp"])
cache_file = Gio.File.new_for_path(cache_name)
cache_file.replace_contents_async(contents,
None,
False,
Gio.FileCreateFlags.REPLACE_DESTINATION,
self.cancellable,
self.on_file_written)
def on_file_written(self, file, result, data=None):
try:
if file.replace_contents_finish(result):
self.set_image_internal(file.get_path())
except GLib.Error:
pass<|fim▁end|> | pixbuf = None
scaled_max_size = self.max_size * self.get_scale_factor()
|
<|file_name|>configuration.test.js<|end_file_name|><|fim▁begin|>'use strict';
const chai = require('chai'),
expect = chai.expect,
config = require('../config/config'),
Support = require('./support'),
dialect = Support.getTestDialect(),
Sequelize = Support.Sequelize,
fs = require('fs'),
path = require('path');
if (dialect === 'sqlite') {
var sqlite3 = require('sqlite3'); // eslint-disable-line
}
describe(Support.getTestDialectTeaser('Configuration'), () => {
describe('Connections problems should fail with a nice message', () => {
it('when we don\'t have the correct server details', () => {
const seq = new Sequelize(config[dialect].database, config[dialect].username, config[dialect].password, { storage: '/path/to/no/where/land', logging: false, host: '0.0.0.1', port: config[dialect].port, dialect });
if (dialect === 'sqlite') {
// SQLite doesn't have a breakdown of error codes, so we are unable to discern between the different types of errors.
return expect(seq.query('select 1 as hello')).to.eventually.be.rejectedWith(Sequelize.ConnectionError, 'SQLITE_CANTOPEN: unable to open database file');
}
return expect(seq.query('select 1 as hello')).to.eventually.be.rejectedWith([Sequelize.HostNotReachableError, Sequelize.InvalidConnectionError]);
});
it('when we don\'t have the correct login information', () => {
if (dialect === 'mssql') {
// NOTE: Travis seems to be having trouble with this test against the
// AWS instance. Works perfectly fine on a local setup.
expect(true).to.be.true;
return;
}
const seq = new Sequelize(config[dialect].database, config[dialect].username, 'fakepass123', { logging: false, host: config[dialect].host, port: 1, dialect });
if (dialect === 'sqlite') {
// SQLite doesn't require authentication and `select 1 as hello` is a valid query, so this should be fulfilled not rejected for it.
return expect(seq.query('select 1 as hello')).to.eventually.be.fulfilled;
}
return expect(seq.query('select 1 as hello')).to.eventually.be.rejectedWith(Sequelize.ConnectionRefusedError, 'connect ECONNREFUSED');
});
it('when we don\'t have a valid dialect.', () => {
expect(() => {
new Sequelize(config[dialect].database, config[dialect].username, config[dialect].password, { host: '0.0.0.1', port: config[dialect].port, dialect: 'some-fancy-dialect' });
}).to.throw(Error, 'The dialect some-fancy-dialect is not supported. Supported dialects: mssql, mariadb, mysql, postgres, and sqlite.');
});
});
describe('Instantiation with arguments', () => {
if (dialect === 'sqlite') {
it('should respect READONLY / READWRITE connection modes', () => {
const p = path.join(__dirname, '../tmp', 'foo.sqlite');
const createTableFoo = 'CREATE TABLE foo (faz TEXT);';
const createTableBar = 'CREATE TABLE bar (baz TEXT);';
const testAccess = Sequelize.Promise.method(() => {
return Sequelize.Promise.promisify(fs.access)(p, fs.R_OK | fs.W_OK);
});
return Sequelize.Promise.promisify(fs.unlink)(p)
.catch(err => {
expect(err.code).to.equal('ENOENT');
})
.then(() => {
const sequelizeReadOnly = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READONLY
}
});
const sequelizeReadWrite = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READWRITE
}
});
expect(sequelizeReadOnly.config.dialectOptions.mode).to.equal(sqlite3.OPEN_READONLY);
expect(sequelizeReadWrite.config.dialectOptions.mode).to.equal(sqlite3.OPEN_READWRITE);
return Sequelize.Promise.join(
sequelizeReadOnly.query(createTableFoo)
.should.be.rejectedWith(Error, 'SQLITE_CANTOPEN: unable to open database file'),
sequelizeReadWrite.query(createTableFoo)
.should.be.rejectedWith(Error, 'SQLITE_CANTOPEN: unable to open database file')
);
})
.then(() => {
// By default, sqlite creates a connection that's READWRITE | CREATE
const sequelize = new Sequelize('sqlite://foo', {
storage: p
});
return sequelize.query(createTableFoo);
})
.then(testAccess)
.then(() => {
const sequelizeReadOnly = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READONLY
}
});
const sequelizeReadWrite = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READWRITE
}
});
return Sequelize.Promise.join(
sequelizeReadOnly.query(createTableBar)
.should.be.rejectedWith(Error, 'SQLITE_READONLY: attempt to write a readonly database'),
sequelizeReadWrite.query(createTableBar)
);
})
.finally(() => {<|fim▁hole|> }
});
});<|fim▁end|> | return Sequelize.Promise.promisify(fs.unlink)(p);
});
}); |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.utils import timezone
from django.contrib import admin
from packages.generic import gmodels
from packages.generic.gmodels import content_file_name,content_file_name_same
from datetime import datetime
from django.core.validators import MaxValueValidator, MinValueValidator
from django.conf import settings as stg<|fim▁hole|>
from embed_video.fields import EmbedVideoField
# Create your models here.
class Conference(models.Model):
title = models.CharField(max_length=160)
def __str__(self):
return self.title
class Category(models.Model):
title = models.CharField(max_length=160)
position = models.PositiveIntegerField(default='0')
class Meta:
verbose_name_plural = 'categories'
def __str__(self):
return self.title
def save(self, *args, **kwargs):
model = self.__class__
if self.position is None:
# Append
try:
last = model.objects.order_by('-position')[0]
self.position = last.position + 1
except IndexError:
# First row
self.position = 0
return super(Category, self).save(*args, **kwargs)
class Code(models.Model):
title = models.CharField(max_length=250)
file = models.FileField(upload_to=content_file_name_same,blank=True)
git_link = models.URLField(blank=True)
programming_language = models.CharField(max_length=40)
details = models.TextField(max_length=600,blank=True)
def __str__(self):
return str(self.title)
class Publication(models.Model):
title = models.CharField(max_length=160)
authors = models.CharField(max_length=220,null=True)
link = models.URLField(null=True, blank=True)
file = models.FileField(upload_to=content_file_name_same, null=True, blank=True)
short = models.CharField(max_length=50,null=True)
bibtex = models.TextField(max_length=1000)
conference_id = models.ForeignKey(Conference)
year = models.PositiveIntegerField(default=datetime.now().year,
validators=[
MaxValueValidator(datetime.now().year + 2),
MinValueValidator(1800)
])
# def __init__(self, *args, **kwargs):
# super(Publication, self).__init__(*args, **kwargs)
# if int(self.conference_id.i) != 0:
# self.conference = Conference.objects.get(id=int(self.conference_id))
def __str__(self):
return self.title
def fullStr(self):
return "%s, \"%s\", %s, %s " % (self.authors, self.title, self.conference_id.title, self.year)
class Project(models.Model):
title = models.CharField(max_length=200)
text = models.TextField(max_length=500)
mtext = models.TextField(max_length=1000,blank=True)
date_created = models.DateTimeField(default=timezone.now)
category_id = models.ForeignKey(Category)
position = models.PositiveIntegerField(default='0')
publications = models.ManyToManyField(Publication, null=True, blank=True)
def save(self, *args, **kwargs):
model = self.__class__
if self.position is None:
# Append
try:
last = model.objects.order_by('-position')[0]
self.position = last.position + 1
except IndexError:
# First row
self.position = 0
return super(Project, self).save(*args, **kwargs)
def get_images(self):
return [y for y in ProjectImage.objects.filter(entity_id_id__exact=self.id)]
def getFirstImage(self):
try:
p = ProjectImage.objects.filter(entity_id_id__exact=self.id)[0]
except IndexError:
p = None
if None != p:
return p
else:
return "default.png"
def get_videos(self):
return [str(y) for y in ProjectVideo.objects.filter(entity_id_id__exact=self.id)]
def get_publications(self):
return [p for p in self.publications.all()]
class Meta:
ordering = ('position',)
def __str__(self):
return self.title
class ProjectImage(gmodels.GImage):
entity_id = models.ForeignKey(Project)
class ProjectVideo(models.Model):
entity_id = models.ForeignKey(Project)
link = EmbedVideoField(null=True) # same like models.URLField()
def __str__(self):
return str(self.link)
class ProjectImageInline(admin.TabularInline):
model = ProjectImage
extra = 1
readonly_fields = ('image_tag',)
class ProjectVideoInline(admin.TabularInline):
model = ProjectVideo
extra = 1
class ProjectAdmin(admin.ModelAdmin):
inlines = [ProjectImageInline, ProjectVideoInline, ]
class Media:
js = ('admin/js/listreorder.js',)
list_display = ('position',)
list_display_links = ('title',)
list_display = ('title', 'position',)
list_editable = ('position',)
class Article(models.Model):
title = models.CharField(max_length=200)
text = models.TextField(max_length=500)
date_created = models.DateTimeField(default=timezone.now)
category_id = models.ForeignKey(Category)
position = models.PositiveIntegerField(default='0')
def save(self, *args, **kwargs):
model = self.__class__
if self.position is None:
# Append
try:
last = model.objects.order_by('-position')[0]
self.position = last.position + 1
except IndexError:
# First row
self.position = 0
return super(Article, self).save(*args, **kwargs)
def get_images(self):
return [y for y in ArticleImage.objects.filter(entity_id_id__exact=self.id)]
def getFirstImage(self):
try:
p = ArticleImage.objects.filter(entity_id_id__exact=self.id)[0]
except IndexError:
p = None
if None != p:
return p
else:
return "default.png"
class Meta:
ordering = ('position',)
def __str__(self):
return self.title
class ArticleImage(gmodels.GImage):
entity_id = models.ForeignKey(Article)
class ArticleImageInline(admin.TabularInline):
model = ArticleImage
extra = 1
readonly_fields = ('image_tag',)
class ArticleAdmin(admin.ModelAdmin):
inlines = [ArticleImageInline, ]
class Media:
js = ('admin/js/listreorder.js',)
list_display = ('position',)
list_display_links = ('title',)
list_display = ('title', 'position',)
list_editable = ('position',)
class CodeSnippet(models.Model):
title = models.CharField(max_length=160)
programming_language = models.CharField(max_length=120)
text = models.TextField(max_length=500)
code = models.TextField(max_length=1000)
date_created = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title<|fim▁end|> | import os
import Image as PImage
|
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 17:08:36 2015
@author: jgimenez
"""
from PyQt4 import QtGui, QtCore
import os
import time
import subprocess
types = {}
types['p'] = 'scalar'
types['U'] = 'vector'
types['p_rgh'] = 'scalar'
types['k'] = 'scalar'
types['epsilon'] = 'scalar'
types['omega'] = 'scalar'
types['alpha'] = 'scalar'
types['nut'] = 'scalar'
types['nuTilda'] = 'scalar'
types['nuSgs'] = 'scalar'
unknowns = ['U','p','p_rgh','alpha','k','nuSgs','epsilon','omega','nuTilda','nut']
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def command_window(palette):
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
def currentFields(currentFolder,filterTurb=True,nproc=1):
#veo los campos que tengo en el directorio inicial
timedir = 0
currtime = 0
logname = '%s/dirFeatures.log'%currentFolder
logerrorname = '%s/error.log'%currentFolder
#print 'nproc: %s'%nproc
if nproc<=1:
command = 'dirFeaturesFoam -case %s 1> %s 2> %s' % (currentFolder,logname,logerrorname)
else:
command = 'mpirun -np %s dirFeaturesFoam -case %s -parallel 1> %s 2> %s' % (nproc,currentFolder,logname,logerrorname)
#print 'command: %s'%command
#p = subprocess.Popen([command],shell=True)
#p.wait()
os.system(command)
log = open(logname, 'r')
for linea in log:
if "Current Time" in linea:
currtime = linea.split('=')[1].strip()
timedir = '%s/%s'%(currentFolder,currtime)
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
#Levanto todos los campos y me fijo cual se ca a utilizar (dependiendo del turbulence model)
allturb = ['k','epsilon','omega','nuSgs','nut','nuTilda']
#le dejo los que voy a utilizar
filename = '%s/constant/turbulenceProperties'%currentFolder
e=False
try:
tprop = ParsedParameterFile(filename,createZipped=False)
except IOError as e:
tprop = {}
if (not e):
if tprop['simulationType']=='RASModel':
filename = '%s/constant/RASProperties'%currentFolder
Rprop = ParsedParameterFile(filename,createZipped=False)
if Rprop['RASModel']=='kEpsilon':
allturb.remove('k')
allturb.remove('epsilon')
if Rprop['RASModel']=='kOmega' or Rprop['RASModel']=='kOmegaSST':
allturb.remove('k')
allturb.remove('omega')
elif tprop['simulationType']=='LESModel':
filename = '%s/constant/LESProperties'%currentFolder
Lprop = ParsedParameterFile(filename,createZipped=False)
if Lprop['LESModel']=='Smagorinsky':
allturb.remove('nuSgs')
NO_FIELDS = ['T0', 'T1', 'T2', 'T3', 'T4', 'nonOrth', 'skew']
if filterTurb:
for it in allturb:
NO_FIELDS.append(it)
command = 'rm -f %s/*~ %s/*.old'%(timedir,timedir)
os.system(command)
while not os.path.isfile(logname):
continue
#Esta linea la agrego porque a veces se resetea el caso y se borra
#el folder de currentTime. Por eso uso el 0, siempre estoy seguro que
#esta presente
if not os.path.isdir(str(timedir)):
timedir = '%s/0'%currentFolder
fields = [ f for f in os.listdir(timedir) if (f not in NO_FIELDS and f in unknowns) ]
return [timedir,fields,currtime]
def backupFile(f):
filename = f
if os.path.isfile(filename) and os.path.getsize(filename) > 0:
newfilepath = filename+'.backup'
command = 'cp %s %s'%(filename,newfilepath)
os.system(command)<|fim▁hole|> output = subprocess.Popen('xrandr | grep "\*" | cut -d" " -f4',shell=True, stdout=subprocess.PIPE).communicate()[0]
resolution = output.split()[0].split(b'x')
return resolution<|fim▁end|> |
def get_screen_resolutions(): |
<|file_name|>topology.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import ipaddress
__all__ = [
'config_to_map',
'get_region'
]
def config_to_map(topology_config):
"""
args:
topology_config: dict
{
'region1': [
'10.1.1.0/24',
'10.1.10.0/24',
'172.16.1.0/24'
],
'region2': [
'192.168.1.0/24',
'10.2.0.0/16',
]
}
Region cannot be "_default"
returns:
topology_map: dict
{
ip_network('10.1.1.0/24'): 'region1',
ip_network('10.1.10.0/24'): 'region1',
ip_network('172.16.1.0/24'): 'region1',
ip_network('192.168.1.0/24'): 'region2',
ip_network('10.2.0.0/16'): 'region2',
}
raises:
ValueError: if a region value is "_default"
"""
topology_map = {}
for region in topology_config:
# "_default" cannot be used as a region name
if region == '_default':
raise ValueError('cannot use "_default" as a region name')
for net_str in topology_config[region]:
net = ipaddress.ip_network(net_str)
topology_map[net] = region
return topology_map
def get_region(ip_str, topology_map):
"""Return name of a region from the topology map for
the given IP address, if multiple networks contain the IP,
region of the most specific(longest prefix length) match is returned,
if multiple equal prefix length found the behavior of which
entry is returned is undefined.
args:
ip_str: string representing an IP address
returns:
string: region name
None: if no region has been found
<|fim▁hole|> ip = ipaddress.ip_address(ip_str)
# find all the matching networks
matches = []
for net in topology_map:
if ip in net:
matches.append(net)
# if only a single match is found return it
if len(matches) == 1:
return topology_map[matches[0]]
# if more than 1 match is found, sort the matches
# by prefixlen, return the longest prefixlen entry
elif len(matches) > 1:
matches.sort(key=lambda net: net.prefixlen)
return topology_map[matches[-1]]
# no matches found
return None<|fim▁end|> | raises:
ValueError: raised by ipaddress if ip_str isn't a valid IP address
""" |
<|file_name|>liste.py<|end_file_name|><|fim▁begin|># -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO Ematelot SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'liste' de la commande 'matelot'."""
from primaires.format.fonctions import supprimer_accents
from primaires.format.tableau import Tableau
from primaires.interpreteur.masque.parametre import Parametre
from secondaires.navigation.equipage.postes.hierarchie import ORDRE
class PrmListe(Parametre):
"""Commande 'matelot liste'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "liste", "list")
self.tronquer = True
self.aide_courte = "liste les matelots de l'équipage"
self.aide_longue = \
"Cette commande liste les matelots de votre équipage. " \
"Elle permet d'obtenir rapidement des informations pratiques " \
"sur le nom du matelot ainsi que l'endroit où il se trouve."
def interpreter(self, personnage, dic_masques):<|fim▁hole|> """Interprétation du paramètre"""
salle = personnage.salle
if not hasattr(salle, "navire"):
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
equipage = navire.equipage
if not navire.a_le_droit(personnage, "officier"):
personnage << "|err|Vous ne pouvez donner d'ordre sur ce " \
"navire.|ff|"
return
matelots = tuple((m, m.nom_poste) for m in \
equipage.matelots.values())
matelots += tuple(equipage.joueurs.items())
matelots = sorted(matelots, \
key=lambda couple: ORDRE.index(couple[1]), reverse=True)
if len(matelots) == 0:
personnage << "|err|Votre équipage ne comprend aucun matelot.|ff|"
return
tableau = Tableau()
tableau.ajouter_colonne("Nom")
tableau.ajouter_colonne("Poste")
tableau.ajouter_colonne("Affectation")
for matelot, nom_poste in matelots:
nom = matelot.nom
nom_poste = nom_poste.capitalize()
titre = "Aucune"
if hasattr(matelot, "personnage"):
titre = matelot.personnage.salle.titre_court.capitalize()
tableau.ajouter_ligne(nom, nom_poste, titre)
personnage << tableau.afficher()<|fim▁end|> | |
<|file_name|>test_passthrough.py<|end_file_name|><|fim▁begin|># #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2018> Gabriel Falcão <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import requests
from unittest import skip
from sure import expect
from httpretty import HTTPretty
@skip
def test_http_passthrough():
url = 'http://httpbin.org/status/200'
response1 = requests.get(url)
response1 = requests.get(url, stream=True)
HTTPretty.enable()
HTTPretty.register_uri(HTTPretty.GET, 'http://google.com/', body="Not Google")
response2 = requests.get('http://google.com/')
expect(response2.content).to.equal(b'Not Google')
response3 = requests.get(url, stream=True)
(response3.content).should.equal(response1.content)
HTTPretty.disable()
response4 = requests.get(url, stream=True)<|fim▁hole|>@skip
def test_https_passthrough():
url = 'https://raw.githubusercontent.com/gabrielfalcao/HTTPretty/master/COPYING'
response1 = requests.get(url, stream=True)
HTTPretty.enable()
HTTPretty.register_uri(HTTPretty.GET, 'https://google.com/', body="Not Google")
response2 = requests.get('https://google.com/')
expect(response2.content).to.equal(b'Not Google')
response3 = requests.get(url, stream=True)
(response3.content).should.equal(response1.content)
HTTPretty.disable()
response4 = requests.get(url, stream=True)
(response4.content).should.equal(response1.content)<|fim▁end|> | (response4.content).should.equal(response1.content)
|
<|file_name|>shapes_test.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trax.shapes."""
from absl.testing import absltest
import numpy as np<|fim▁hole|>
from trax import shapes
from trax.shapes import ShapeDtype
class ShapesTest(absltest.TestCase):
def test_constructor_and_read_properties(self):
sd = ShapeDtype((2, 3), np.int32)
self.assertEqual(sd.shape, (2, 3))
self.assertEqual(sd.dtype, np.int32)
def test_default_dtype_is_float32(self):
sd = ShapeDtype((2, 3))
self.assertEqual(sd.shape, (2, 3))
self.assertEqual(sd.dtype, np.float32)
def test_signature_on_ndarray(self):
array = np.array([[2, 3, 5, 7],
[11, 13, 17, 19]],
dtype=np.int16)
sd = shapes.signature(array)
self.assertEqual(sd.shape, (2, 4))
self.assertEqual(sd.dtype, np.int16)
def test_shape_dtype_repr(self):
sd = ShapeDtype((2, 3))
repr_string = '{}'.format(sd)
self.assertEqual(repr_string,
"ShapeDtype{shape:(2, 3), dtype:<class 'numpy.float32'>}")
def test_splice_signatures(self):
sd1 = ShapeDtype((1,))
sd2 = ShapeDtype((2,))
sd3 = ShapeDtype((3,))
sd4 = ShapeDtype((4,))
sd5 = ShapeDtype((5,))
# Signatures can be ShapeDtype instances, tuples of 2+ ShapeDtype instances,
# or empty tuples.
sig1 = sd1
sig2 = (sd2, sd3, sd4)
sig3 = ()
sig4 = sd5
spliced = shapes.splice_signatures(sig1, sig2, sig3, sig4)
self.assertEqual(spliced, (sd1, sd2, sd3, sd4, sd5))
def test_len_signature(self):
"""Signatures of all sizes should give correct length when asked."""
x1 = np.array([1, 2, 3])
x2 = np.array([10, 20, 30])
inputs0 = ()
inputs1 = x1 # NOT in a tuple
inputs2 = (x1, x2)
sig0 = shapes.signature(inputs0)
sig1 = shapes.signature(inputs1)
sig2 = shapes.signature(inputs2)
# pylint: disable=g-generic-assert
self.assertEqual(len(sig0), 0)
self.assertEqual(len(sig1), 1)
self.assertEqual(len(sig2), 2)
# pylint: enable=g-generic-assert
if __name__ == '__main__':
absltest.main()<|fim▁end|> | |
<|file_name|>gui_basic.py<|end_file_name|><|fim▁begin|>import pygame, sys
from pygame.locals import *
import re
import json
import imp
import copy
#chessboard = json.load(open("./common/initial_state.json"))
chessboard1 = json.load(open("./common/initial_state.json"))
chessboard2 = json.load(open("./common/initial_state.json"))
chessboard3 = json.load(open("./common/initial_state.json"))
#created 3 chessboards for now
chessboards = [chessboard1, chessboard2, chessboard3]
chessboard = chessboards[0] #current board set to the first.
image_dir = "./res/basic_chess_pieces/"
rules = imp.load_source('chess_basic_rules','./common/rules.py')
cpu = imp.load_source('chess_minimax_ai','./ai/cpu.py')<|fim▁hole|>
opposite = { "white" : "black" , "black" : "white" }
def get_chess_square(x,y,size):
return [ x/size+1,y/size+1]
def get_chess_square_reverse(a,b,size):
return ((a-1)*size/8,(b-1)*size/8)
def get_chess_square_border(r, s, size):
return((r-1)*size/8+2, (s-1)*size/8+2)
pygame.init()
screen = pygame.display.set_mode((600, 600))
def draw_chessboard( board, size,p_list = None):
SIZE = size
GRAY = (150, 150, 150)
WHITE = (255, 255, 255)
BLUE = ( 0 , 0 , 150)
screen.fill(WHITE)
#filling gray square blocks of size/8 alternatively
startX = 0
startY = 0
for e in range(0, 8):
if e%2 == 0 :
startX = 0
else:
startX = SIZE/8
for e2 in range(0, 8):
pygame.draw.rect(screen, GRAY, ((startX, startY), (SIZE/8, SIZE/8)))
startX += 2* SIZE/8
startY += SIZE/8
#placing the correspoding images of the pieces on the blocks
for army in board.keys():
for k in board[army].keys():
img = pygame.image.load(image_dir + army + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[army][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[army][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
#if any piece is selected and has some legal moves then display blue squares on corresponding valid move block
if p_list:
for p in p_list:
pygame.draw.rect(screen,BLUE,(get_chess_square_reverse(p[1],p[0],SIZE),(SIZE/8,SIZE/8)))
if (p[1]+p[0])%2!=0:
pygame.draw.rect(screen, WHITE, (get_chess_square_border(p[1], p[0], SIZE), (SIZE/8-4, SIZE/8-4)))
else:
pygame.draw.rect(screen, GRAY, (get_chess_square_border(p[1], p[0], SIZE), (SIZE/8-4, SIZE/8-4)))
x, y = p[1], p[0]
for x in ['white','black']:
for k in board[x].keys():
if board[x][k][1] == p[1] and board[x][k][0] == p[0]: #print k
if "bishop" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
elif "pawn" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
elif "knight" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
elif "rook" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
elif "queen" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
elif "king" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
pygame.display.update()
def looping_cpu_vs_human(board,size):
global chessboards
global flag
SIZE = size
draw_chessboard(board,size)
cur=0
old_x=0
old_y=0
new_x=0
new_y=0
color = "white"
flag= 0
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
#checking for keyboard events
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
cur = (cur+1)%3
board = chessboards[cur]
if event.key == pygame.K_LEFT:
cur = (cur+2)%3
board = chessboards[cur]
#updating the screen with the next or prev chessboard
draw_chessboard(board,size)
if event.type == pygame.MOUSEBUTTONDOWN:
if flag == 1:
flag =0
x,y= pygame.mouse.get_pos()
new_x,new_y = get_chess_square(x,y,SIZE/8)
#print new_x,new_y
valid = False
for x in ['white','black']:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y:
if "bishop" in k:
if [new_y,new_x] in rules.legal_bishop_moves(board,x,k): valid = True
elif "pawn" in k:
if [new_y,new_x] in rules.legal_pawn_moves(board,x,k): valid = True
elif "knight" in k:
if [new_y,new_x] in rules.legal_knight_moves(board,x,k): valid = True
elif "rook" in k:
if [new_y,new_x] in rules.legal_rook_moves(board,x,k): valid = True
elif "queen" in k:
if [new_y,new_x] in rules.legal_queen_moves(board,x,k): valid = True
elif "king" in k:
if [new_y,new_x] in rules.legal_king_moves(board,x,k): valid = True
#if piece is moved to valid position then update the piece's coordinates and check if it is killing other piece
if valid and x == color:
board[x][k][1] = new_x
board[x][k][0] = new_y
killed_piece = None
for k,v in board[opposite[x]].iteritems():
if v[0] == new_y and v[1] == new_x:
killed_piece = k
if killed_piece and (killed_piece in board[opposite[x]].keys()):
del board[opposite[x]][killed_piece]
break
draw_chessboard(board,size)
#move = cpu.minimax(board,opposite[x],1) ##depth is 1
#CPU turn
move = cpu.alpha_beta_pruning(board,opposite[x],3)
#board = helper.generate_board(board,move)
#referencing the new board generated by helper first to chessboard array element
chessboards[cur] = helper.generate_board(board,move)
board = chessboards[cur]
draw_chessboard(board,size)
break #Break here is necessary since we are deleting a key from the map on which we are iterating
else:
print "here"
x,y= pygame.mouse.get_pos()
old_x,old_y = get_chess_square(x,y,SIZE/8)
p= []
for x in ['white','black']:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y: #print k
if "bishop" in k:
p= rules.legal_bishop_moves(board,x,k)
elif "pawn" in k:
p= rules.legal_pawn_moves(board,x,k)
elif "knight" in k:
p= rules.legal_knight_moves(board,x,k)
elif "rook" in k:
p= rules.legal_rook_moves(board,x,k)
elif "queen" in k:
p= rules.legal_queen_moves(board,x,k)
elif "king" in k:
p= rules.legal_king_moves( board,x,k)
draw_chessboard(board,size,p)
#print old_x,old_y
if event.type == pygame.MOUSEBUTTONUP:
print "here1"
x,y= pygame.mouse.get_pos()
new_x,new_y = get_chess_square(x,y,SIZE/8)
if new_x == old_x and new_y == old_y:
flag = 1
continue
else:
#print new_x,new_y
valid = False
for x in ['white','black']:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y:
if "bishop" in k:
if [new_y,new_x] in rules.legal_bishop_moves(board,x,k): valid = True
elif "pawn" in k:
if [new_y,new_x] in rules.legal_pawn_moves(board,x,k): valid = True
elif "knight" in k:
if [new_y,new_x] in rules.legal_knight_moves(board,x,k): valid = True
elif "rook" in k:
if [new_y,new_x] in rules.legal_rook_moves(board,x,k): valid = True
elif "queen" in k:
if [new_y,new_x] in rules.legal_queen_moves(board,x,k): valid = True
elif "king" in k:
if [new_y,new_x] in rules.legal_king_moves(board,x,k): valid = True
#if piece is moved to valid position then update the piece's coordinates and check if it is killing other piece
if valid and x == color:
board[x][k][1] = new_x
board[x][k][0] = new_y
killed_piece = None
for k,v in board[opposite[x]].iteritems():
if v[0] == new_y and v[1] == new_x:
killed_piece = k
if killed_piece and (killed_piece in board[opposite[x]].keys()):
del board[opposite[x]][killed_piece]
break
draw_chessboard(board,size) #move = cpu.minimax(board,opposite[x],1) ##depth is 1
#CPU turn
move = cpu.alpha_beta_pruning(board,opposite[x],7)
#board = helper.generate_board(board,move)
#referencing the new board generated by helper first to chessboard array element
chessboards[cur] = helper.generate_board(board,move)
board = chessboards[cur]
draw_chessboard(board,size)
break #Break here is necessary since we are deleting a key from the map on which we are iterating
def looping_cpu_vs_cpu(board,size):
global chessboards
draw_chessboard(board,size)
color = "white"
cur = 0
#print board
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
#checking for keyboard events
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
cur = (cur+1)%3
board = chessboards[cur]
if event.key == pygame.K_LEFT:
cur = (cur+2)%3
board = chessboards[cur]
#updating the screen with the next or prev chessboard
draw_chessboard(board,size)
move = cpu.alpha_beta_pruning_python_native(board,color,1) #depth is 1
#move = cpu.alpha_beta_pruning(board,color,7)
chessboards[cur] = helper.generate_board(board,move)
board = chessboards[cur]
color = opposite[color]
draw_chessboard(board,size)
def looping_human_vs_human(board, size):
global chessboards
global flag
SIZE = size
draw_chessboard(board,size)
cur=0
old_x=0
old_y=0
new_x=0
new_y=0
color = "white"
flag = 0
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
#checking for keyboard events
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
cur = (cur+1)%3
board = chessboards[cur]
if event.key == pygame.K_LEFT:
cur = (cur+2)%3
board = chessboards[cur]
#updating the screen with the next or prev chessboard
draw_chessboard(board,size)
if event.type == pygame.MOUSEBUTTONDOWN:
if flag == 1:
x,y= pygame.mouse.get_pos()
new_x,new_y = get_chess_square(x,y,SIZE/8)
#print new_x,new_y
valid = False
for x in [color]:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y:
if "bishop" in k:
if [new_y,new_x] in rules.legal_bishop_moves(board,x,k): valid = True
elif "pawn" in k:
if [new_y,new_x] in rules.legal_pawn_moves(board,x,k): valid = True
elif "knight" in k:
if [new_y,new_x] in rules.legal_knight_moves(board,x,k): valid = True
elif "rook" in k:
if [new_y,new_x] in rules.legal_rook_moves(board,x,k): valid = True
elif "queen" in k:
if [new_y,new_x] in rules.legal_queen_moves(board,x,k): valid = True
elif "king" in k:
if [new_y,new_x] in rules.legal_king_moves(board,x,k): valid = True
#if piece is moved to valid position then update the piece's coordinates and check if it is killing other piece
if valid and x == color:
board[x][k][1] = new_x
board[x][k][0] = new_y
killed_piece = None
for k,v in board[opposite[x]].iteritems():
if v[0] == new_y and v[1] == new_x:
killed_piece = k
if killed_piece and (killed_piece in board[opposite[x]].keys()): del board[opposite[x]][killed_piece]
draw_chessboard(board,size)
color = opposite[color]
break
flag = 0
else:
x,y= pygame.mouse.get_pos()
old_x,old_y = get_chess_square(x,y,SIZE/8)
p= []
for x in [color]:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y:
#print k
if "bishop" in k:
p= rules.legal_bishop_moves(board,x,k)
elif "pawn" in k:
#print "hey"
p= rules.legal_pawn_moves(board,x,k)
elif "knight" in k:
p= rules.legal_knight_moves(board,x,k)
elif "rook" in k:
p= rules.legal_rook_moves(board,x,k)
elif "queen" in k:
p= rules.legal_queen_moves(board,x,k)
elif "king" in k:
p= rules.legal_king_moves( board,x,k)
draw_chessboard(board,size,p)
#print old_x,old_y
if event.type == pygame.MOUSEBUTTONUP:
x,y= pygame.mouse.get_pos()
new_x,new_y = get_chess_square(x,y,SIZE/8)
if new_x == old_x and new_y == old_y:
flag = 1
continue
else:
#print new_x,new_y
valid = False
for x in [color]:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y:
if "bishop" in k:
if [new_y,new_x] in rules.legal_bishop_moves(board,x,k): valid = True
elif "pawn" in k:
if [new_y,new_x] in rules.legal_pawn_moves(board,x,k): valid = True
elif "knight" in k:
if [new_y,new_x] in rules.legal_knight_moves(board,x,k): valid = True
elif "rook" in k:
if [new_y,new_x] in rules.legal_rook_moves(board,x,k): valid = True
elif "queen" in k:
if [new_y,new_x] in rules.legal_queen_moves(board,x,k): valid = True
elif "king" in k:
if [new_y,new_x] in rules.legal_king_moves(board,x,k): valid = True
#if piece is moved to valid position then update the piece's coordinates and check if it is killing other piece
if valid and x == color:
board[x][k][1] = new_x
board[x][k][0] = new_y
killed_piece = None
for k,v in board[opposite[x]].iteritems():
if v[0] == new_y and v[1] == new_x:
killed_piece = k
if killed_piece and (killed_piece in board[opposite[x]].keys()): del board[opposite[x]][killed_piece]
draw_chessboard(board,size)
color = opposite[color]
break
##main loop ...
#looping_cpu_vs_human( chessboard,600)
#looping_cpu_vs_cpu( chessboard,600)<|fim▁end|> | helper = imp.load_source('helper_functions','./common/helper_functions.py') |
<|file_name|>lcg128xsl64.rs<|end_file_name|><|fim▁begin|>use rand_core::{RngCore, SeedableRng};
use rand_pcg::{Lcg128Xsl64, Pcg64};
#[test]
fn test_lcg128xsl64_advancing() {
for seed in 0..20 {
let mut rng1 = Lcg128Xsl64::seed_from_u64(seed);
let mut rng2 = rng1.clone();
for _ in 0..20 {
rng1.next_u64();
}<|fim▁hole|> assert_eq!(rng1, rng2);
}
}
#[test]
fn test_lcg128xsl64_construction() {
// Test that various construction techniques produce a working RNG.
#[rustfmt::skip]
let seed = [1,2,3,4, 5,6,7,8, 9,10,11,12, 13,14,15,16,
17,18,19,20, 21,22,23,24, 25,26,27,28, 29,30,31,32];
let mut rng1 = Lcg128Xsl64::from_seed(seed);
assert_eq!(rng1.next_u64(), 8740028313290271629);
let mut rng2 = Lcg128Xsl64::from_rng(&mut rng1).unwrap();
assert_eq!(rng2.next_u64(), 1922280315005786345);
let mut rng3 = Lcg128Xsl64::seed_from_u64(0);
assert_eq!(rng3.next_u64(), 2354861276966075475);
// This is the same as Lcg128Xsl64, so we only have a single test:
let mut rng4 = Pcg64::seed_from_u64(0);
assert_eq!(rng4.next_u64(), 2354861276966075475);
}
#[test]
fn test_lcg128xsl64_true_values() {
// Numbers copied from official test suite (C version).
let mut rng = Lcg128Xsl64::new(42, 54);
let mut results = [0u64; 6];
for i in results.iter_mut() {
*i = rng.next_u64();
}
let expected: [u64; 6] = [
0x86b1da1d72062b68,
0x1304aa46c9853d39,
0xa3670e9e0dd50358,
0xf9090e529a7dae00,
0xc85b9fd837996f2c,
0x606121f8e3919196,
];
assert_eq!(results, expected);
}
#[cfg(feature = "serde1")]
#[test]
fn test_lcg128xsl64_serde() {
use bincode;
use std::io::{BufReader, BufWriter};
let mut rng = Lcg128Xsl64::seed_from_u64(0);
let buf: Vec<u8> = Vec::new();
let mut buf = BufWriter::new(buf);
bincode::serialize_into(&mut buf, &rng).expect("Could not serialize");
let buf = buf.into_inner().unwrap();
let mut read = BufReader::new(&buf[..]);
let mut deserialized: Lcg128Xsl64 =
bincode::deserialize_from(&mut read).expect("Could not deserialize");
for _ in 0..16 {
assert_eq!(rng.next_u64(), deserialized.next_u64());
}
}<|fim▁end|> | rng2.advance(20); |
<|file_name|>djvutext.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
"""
This bot uploads text from djvu files onto pages in the "Page" namespace.
It is intended to be used for Wikisource.
The following parameters are supported:
-index:... name of the index page (without the Index: prefix)
-djvu:... path to the djvu file, it shall be:
- path to a file name
- dir where a djvu file name as index is located
optional, by default is current dir '.'
-pages:<start>-<end>,...<start>-<end>,<start>-<end>
Page range to upload;
optional, start=1, end=djvu file number of images.
Page ranges can be specified as:
A-B -> pages A until B
A- -> pages A until number of images
A -> just page A
-B -> pages 1 until B
This script is a :py:obj:`ConfigParserBot <pywikibot.bot.ConfigParserBot>`.
The following options can be set within a settings file which is scripts.ini
by default:
-summary: custom edit summary.
Use quotes if edit summary contains spaces.
-force overwrites existing text
optional, default False
-always do not bother asking to confirm any of the changes.
"""
#
# (C) Pywikibot team, 2008-2022
#
# Distributed under the terms of the MIT license.
#
import os.path
from typing import Optional
import pywikibot
from pywikibot import i18n
from pywikibot.bot import SingleSiteBot
from pywikibot.exceptions import NoPageError
from pywikibot.proofreadpage import ProofreadPage
from pywikibot.tools.djvu import DjVuFile
class DjVuTextBot(SingleSiteBot):
"""
A bot that uploads text-layer from djvu files to Page:namespace.
Works only on sites with Proofread Page extension installed.
.. versionchanged:: 7.0
CheckerBot is a ConfigParserBot
"""
update_options = {
'force': False,
'summary': '',
}
def __init__(
self,
djvu,
index,
pages: Optional[tuple] = None,
**kwargs
) -> None:
"""
Initializer.
:param djvu: djvu from where to fetch the text layer
:type djvu: DjVuFile object
:param index: index page in the Index: namespace
:type index: Page object
:param pages: page interval to upload (start, end)
"""
super().__init__(**kwargs)
self._djvu = djvu
self._index = index
self._prefix = self._index.title(with_ns=False)
self._page_ns = self.site._proofread_page_ns.custom_name
if not pages:
self._pages = (1, self._djvu.number_of_images())
else:
self._pages = pages
# Get edit summary message if it's empty.
if not self.opt.summary:
self.opt.summary = i18n.twtranslate(self._index.site,
'djvutext-creating')
def page_number_gen(self):
"""Generate pages numbers from specified page intervals."""
last = 0
for start, end in sorted(self._pages):
start = max(last, start)
last = end + 1
yield from range(start, last)
@property
def generator(self):
"""Generate pages from specified page interval."""
for page_number in self.page_number_gen():
title = '{page_ns}:{prefix}/{number}'.format(
page_ns=self._page_ns,
prefix=self._prefix,
number=page_number)
page = ProofreadPage(self._index.site, title)
page.page_number = page_number # remember page number in djvu file
yield page
def treat(self, page) -> None:
"""Process one page."""
old_text = page.text
# Overwrite body of the page with content from djvu
page.body = self._djvu.get_page(page.page_number)
new_text = page.text
if page.exists() and not self.opt.force:
pywikibot.output(
'Page {} already exists, not adding!\n'
'Use -force option to overwrite the output page.'
.format(page))
else:
self.userPut(page, old_text, new_text, summary=self.opt.summary)
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
index = None
djvu_path = '.' # default djvu file directory
pages = '1-'
options = {}
# Parse command line arguments.
local_args = pywikibot.handle_args(args)
for arg in local_args:
opt, _, value = arg.partition(':')
if opt == '-index':
index = value
elif opt == '-djvu':
djvu_path = value
elif opt == '-pages':
pages = value
elif opt == '-summary':
options['summary'] = value
elif opt in ('-force', '-always'):<|fim▁hole|> else:
pywikibot.output('Unknown argument ' + arg)
# index is mandatory.
if not index:
pywikibot.bot.suggest_help(missing_parameters=['-index'])
return
# If djvu_path is not a file, build djvu_path from dir+index.
djvu_path = os.path.expanduser(djvu_path)
djvu_path = os.path.abspath(djvu_path)
if not os.path.exists(djvu_path):
pywikibot.error('No such file or directory: ' + djvu_path)
return
if os.path.isdir(djvu_path):
djvu_path = os.path.join(djvu_path, index)
# Check the djvu file exists and, if so, create the DjVuFile wrapper.
djvu = DjVuFile(djvu_path)
if not djvu.has_text():
pywikibot.error('No text layer in djvu file {}'.format(djvu.file))
return
# Parse pages param.
pages = pages.split(',')
for i, page_interval in enumerate(pages):
start, sep, end = page_interval.partition('-')
start = int(start or 1)
end = int(end or djvu.number_of_images()) if sep else start
pages[i] = (start, end)
site = pywikibot.Site()
if not site.has_extension('ProofreadPage'):
pywikibot.error('Site {} must have ProofreadPage extension.'
.format(site))
return
index_page = pywikibot.Page(site, index, ns=site.proofread_index_ns)
if not index_page.exists():
raise NoPageError(index)
pywikibot.output('uploading text from {} to {}'
.format(djvu.file, index_page.title(as_link=True)))
bot = DjVuTextBot(djvu, index_page, pages=pages, site=site, **options)
bot.run()
if __name__ == '__main__':
try:
main()
except Exception:
pywikibot.error('Fatal error:', exc_info=True)<|fim▁end|> | options[opt[1:]] = True |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@w$i9&1blz%(h_kx4qsoq_2e11l#z9%=7+aseo1xdb-8^b-(b5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'ckeditor_uploader',
'blog',
'userprofiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)<|fim▁hole|>MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'public/media')
CKEDITOR_JQUERY_URL = 'https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_UPLOAD_SLUGIFY_FILENAME = False
CKEDITOR_RESTRICT_BY_USER = True
CKEDITOR_BROWSE_SHOW_DIRS = True
CKEDITOR_IMAGE_BACKEND = "pillow"<|fim▁end|> |
STATIC_ROOT = os.path.join(BASE_DIR, "public/static")
|
<|file_name|>zcopy.js<|end_file_name|><|fim▁begin|>jQuery( document ).ready(function($) {
/*
$( ".show-button" ).each(function() {
var id=jQuery(this).attr("id");
console.log('#copy-'+id);
$('#copy-'+id).zclip({
path:Drupal.settings.zclipcopy.moviepath,
copy:function(){<|fim▁hole|> return $('#copy-'+id).text();
},
afterCopy:function(){
//alert('hello');
$( "span" ).each(function(){
$('.show-text').hide();
$('.show-code').show();
var span_id=$(this).attr("id");
$('#'+span_id).html( 'Copy code');
});
$('#'+id).html( 'Code copied..');
var afl_url = $.trim($('#url-'+id).html());
if( Drupal.settings.zclipcopy.coupon_same_page_redirection == 'on'){
setTimeout(function() {
window.location.href = afl_url;
}, 5000);
}
if( Drupal.settings.zclipcopy.coupon_new_page_redirection == 'on'){
window.open(afl_url,'_blank');
}
}
});
});
*/
$('span#node-copy-button').zclip({
path:Drupal.settings.zclipcopy.moviepath,
copy:function(){ return $.trim($('#node-coupon-code').text()); },
afterCopy:function(){
$('#node-copy-button').html( 'Code copied');
var afl_url = $.trim($('#url-'+id).html());
if( Drupal.settings.zclipcopy.coupon_same_page_redirection == 'on'){
setTimeout(function() { window.location.href = afl_url;}, Drupal.settings.zclipcopy.redirect_delay);
}
if( Drupal.settings.zclipcopy.coupon_new_page_redirection == 'on'){
window.open(afl_url,'_blank');
}
}
});
/*
$(".coup-title").on("click", function () {
var id = jQuery(this).attr("id");
var afl_url = $.trim($('#url-coupon-' + id).html());
if (Drupal.settings.zclipcopy.coupon_same_page_redirection == 'on') {
setTimeout(function () {
window.location.href = afl_url;
}, Drupal.settings.zclipcopy.redirect_delay);
}
if (Drupal.settings.zclipcopy.coupon_new_page_redirection == 'on') {
window.open(afl_url, '_blank');
}
});
*/
});<|fim▁end|> | conslode.log($('#copy-'+id).text()); |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup, find_packages
setup(name='BIOMD0000000360',
version=20140916,<|fim▁hole|> packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)<|fim▁end|> | description='BIOMD0000000360 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000360',
maintainer='Stanley Gu',
maintainer_url='[email protected]', |
<|file_name|>JSMethodInvocationHandler.java<|end_file_name|><|fim▁begin|>package sagex.phoenix.remote.services;
import java.lang.reflect.InvocationHandler;<|fim▁hole|>import java.util.HashMap;
import java.util.Map;
import javax.script.Invocable;
import javax.script.ScriptException;
import sagex.phoenix.util.PhoenixScriptEngine;
public class JSMethodInvocationHandler implements InvocationHandler {
private PhoenixScriptEngine eng;
private Map<String, String> methodMap = new HashMap<String, String>();
public JSMethodInvocationHandler(PhoenixScriptEngine eng, String interfaceMethod, String jsMethod) {
this.eng = eng;
methodMap.put(interfaceMethod, jsMethod);
}
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if (Object.class == method.getDeclaringClass()) {
String name = method.getName();
if ("equals".equals(name)) {
return proxy == args[0];
} else if ("hashCode".equals(name)) {
return System.identityHashCode(proxy);
} else if ("toString".equals(name)) {
return proxy.getClass().getName() + "@" + Integer.toHexString(System.identityHashCode(proxy))
+ ", with InvocationHandler " + this;
} else {
throw new IllegalStateException(String.valueOf(method));
}
}
String jsMethod = methodMap.get(method.getName());
if (jsMethod == null) {
throw new NoSuchMethodException("No Javascript Method for " + method.getName());
}
Invocable inv = (Invocable) eng.getEngine();
try {
return inv.invokeFunction(jsMethod, args);
} catch (NoSuchMethodException e) {
throw new NoSuchMethodException("The Java Method: " + method.getName() + " maps to a Javascript Method " + jsMethod
+ " that does not exist.");
} catch (ScriptException e) {
throw e;
}
}
}<|fim▁end|> | import java.lang.reflect.Method; |
<|file_name|>zephyr_mirror_backend.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from typing import IO, Any, Text, Union, Set, Tuple
from types import FrameType
import sys
from six.moves import map
from six.moves import range
try:
import simplejson
except ImportError:
import json as simplejson # type: ignore
import re
import time
import subprocess
import optparse
import os
import datetime
import textwrap
import time
import signal
import logging
import hashlib
import tempfile
import select
DEFAULT_SITE = "https://api.zulip.com"
class States(object):
Startup, ZulipToZephyr, ZephyrToZulip, ChildSending = list(range(4))
CURRENT_STATE = States.Startup
logger = None # type: logging.Logger
def to_zulip_username(zephyr_username):
# type: (str) -> str
if "@" in zephyr_username:
(user, realm) = zephyr_username.split("@")
else:
(user, realm) = (zephyr_username, "ATHENA.MIT.EDU")
if realm.upper() == "ATHENA.MIT.EDU":
# Hack to make ctl's fake username setup work :)
if user.lower() == 'golem':
user = 'ctl'
return user.lower() + "@mit.edu"
return user.lower() + "|" + realm.upper() + "@mit.edu"
def to_zephyr_username(zulip_username):
# type: (str) -> str
(user, realm) = zulip_username.split("@")
if "|" not in user:
# Hack to make ctl's fake username setup work :)
if user.lower() == 'ctl':
user = 'golem'
return user.lower() + "@ATHENA.MIT.EDU"
match_user = re.match(r'([a-zA-Z0-9_]+)\|(.+)', user)
if not match_user:
raise Exception("Could not parse Zephyr realm for cross-realm user %s" % (zulip_username,))
return match_user.group(1).lower() + "@" + match_user.group(2).upper()
# Checks whether the pair of adjacent lines would have been
# linewrapped together, had they been intended to be parts of the same
# paragraph. Our check is whether if you move the first word on the
# 2nd line onto the first line, the resulting line is either (1)
# significantly shorter than the following line (which, if they were
# in the same paragraph, should have been wrapped in a way consistent
# with how the previous line was wrapped) or (2) shorter than 60
# characters (our assumed minimum linewrapping threshold for Zephyr)
# or (3) the first word of the next line is longer than this entire
# line.
def different_paragraph(line, next_line):
# type: (str, str) -> bool
words = next_line.split()
return (len(line + " " + words[0]) < len(next_line) * 0.8 or
len(line + " " + words[0]) < 50 or
len(line) < len(words[0]))
# Linewrapping algorithm based on:
# http://gcbenison.wordpress.com/2011/07/03/a-program-to-intelligently-remove-carriage-returns-so-you-can-paste-text-without-having-it-look-awful/ #ignorelongline
def unwrap_lines(body):
# type: (str) -> str
lines = body.split("\n")
result = ""
previous_line = lines[0]
for line in lines[1:]:
line = line.rstrip()
if (re.match(r'^\W', line, flags=re.UNICODE) and
re.match(r'^\W', previous_line, flags=re.UNICODE)):
result += previous_line + "\n"
elif (line == "" or
previous_line == "" or
re.match(r'^\W', line, flags=re.UNICODE) or
different_paragraph(previous_line, line)):
# Use 2 newlines to separate sections so that we<|fim▁hole|> else:
result += previous_line + " "
previous_line = line
result += previous_line
return result
def send_zulip(zeph):
# type: (Dict[str, str]) -> Dict[str, str]
message = {}
if options.forward_class_messages:
message["forged"] = "yes"
message['type'] = zeph['type']
message['time'] = zeph['time']
message['sender'] = to_zulip_username(zeph['sender'])
if "subject" in zeph:
# Truncate the subject to the current limit in Zulip. No
# need to do this for stream names, since we're only
# subscribed to valid stream names.
message["subject"] = zeph["subject"][:60]
if zeph['type'] == 'stream':
# Forward messages sent to -c foo -i bar to stream bar subject "instance"
if zeph["stream"] == "message":
message['to'] = zeph['subject'].lower()
message['subject'] = "instance %s" % (zeph['subject'],)
elif zeph["stream"] == "tabbott-test5":
message['to'] = zeph['subject'].lower()
message['subject'] = "test instance %s" % (zeph['subject'],)
else:
message["to"] = zeph["stream"]
else:
message["to"] = zeph["recipient"]
message['content'] = unwrap_lines(zeph['content'])
if options.test_mode and options.site == DEFAULT_SITE:
logger.debug("Message is: %s" % (str(message),))
return {'result': "success"}
return zulip_client.send_message(message)
def send_error_zulip(error_msg):
# type: (str) -> None
message = {"type": "private",
"sender": zulip_account_email,
"to": zulip_account_email,
"content": error_msg,
}
zulip_client.send_message(message)
current_zephyr_subs = set()
def zephyr_bulk_subscribe(subs):
# type: (List[Tuple[str, str, str]]) -> None
try:
zephyr._z.subAll(subs)
except IOError:
# Since we haven't added the subscription to
# current_zephyr_subs yet, we can just return (so that we'll
# continue processing normal messages) and we'll end up
# retrying the next time the bot checks its subscriptions are
# up to date.
logger.exception("Error subscribing to streams (will retry automatically):")
logger.warning("Streams were: %s" % ([cls for cls, instance, recipient in subs],))
return
try:
actual_zephyr_subs = [cls for (cls, _, _) in zephyr._z.getSubscriptions()]
except IOError:
logger.exception("Error getting current Zephyr subscriptions")
# Don't add anything to current_zephyr_subs so that we'll
# retry the next time we check for streams to subscribe to
# (within 15 seconds).
return
for (cls, instance, recipient) in subs:
if cls not in actual_zephyr_subs:
logger.error("Zephyr failed to subscribe us to %s; will retry" % (cls,))
try:
# We'll retry automatically when we next check for
# streams to subscribe to (within 15 seconds), but
# it's worth doing 1 retry immediately to avoid
# missing 15 seconds of messages on the affected
# classes
zephyr._z.sub(cls, instance, recipient)
except IOError:
pass
else:
current_zephyr_subs.add(cls)
def update_subscriptions():
# type: () -> None
try:
f = open(options.stream_file_path, "r")
public_streams = simplejson.loads(f.read())
f.close()
except Exception:
logger.exception("Error reading public streams:")
return
classes_to_subscribe = set()
for stream in public_streams:
zephyr_class = stream.encode("utf-8")
if (options.shard is not None and
not hashlib.sha1(zephyr_class).hexdigest().startswith(options.shard)):
# This stream is being handled by a different zephyr_mirror job.
continue
if zephyr_class in current_zephyr_subs:
continue
classes_to_subscribe.add((zephyr_class, "*", "*"))
if len(classes_to_subscribe) > 0:
zephyr_bulk_subscribe(list(classes_to_subscribe))
def maybe_kill_child():
# type: () -> None
try:
if child_pid is not None:
os.kill(child_pid, signal.SIGTERM)
except OSError:
# We don't care if the child process no longer exists, so just log the error
logger.exception("")
def maybe_restart_mirroring_script():
# type: () -> None
if os.stat(os.path.join(options.root_path, "stamps", "restart_stamp")).st_mtime > start_time or \
((options.user == "tabbott" or options.user == "tabbott/extra") and
os.stat(os.path.join(options.root_path, "stamps", "tabbott_stamp")).st_mtime > start_time):
logger.warning("")
logger.warning("zephyr mirroring script has been updated; restarting...")
maybe_kill_child()
try:
zephyr._z.cancelSubs()
except IOError:
# We don't care whether we failed to cancel subs properly, but we should log it
logger.exception("")
while True:
try:
os.execvp(os.path.join(options.root_path, "user_root", "zephyr_mirror_backend.py"), sys.argv)
except Exception:
logger.exception("Error restarting mirroring script; trying again... Traceback:")
time.sleep(1)
def process_loop(log):
# type: (IO) -> None
restart_check_count = 0
last_check_time = time.time()
while True:
select.select([zephyr._z.getFD()], [], [], 15)
try:
# Fetch notices from the queue until its empty
while True:
notice = zephyr.receive(block=False)
if notice is None:
break
try:
process_notice(notice, log)
except Exception:
logger.exception("Error relaying zephyr:")
time.sleep(2)
except Exception:
logger.exception("Error checking for new zephyrs:")
time.sleep(1)
continue
if time.time() - last_check_time > 15:
last_check_time = time.time()
try:
maybe_restart_mirroring_script()
if restart_check_count > 0:
logger.info("Stopped getting errors checking whether restart is required.")
restart_check_count = 0
except Exception:
if restart_check_count < 5:
logger.exception("Error checking whether restart is required:")
restart_check_count += 1
if options.forward_class_messages:
try:
update_subscriptions()
except Exception:
logger.exception("Error updating subscriptions from Zulip:")
def parse_zephyr_body(zephyr_data):
# type: (str) -> Tuple[str, str]
try:
(zsig, body) = zephyr_data.split("\x00", 1)
except ValueError:
(zsig, body) = ("", zephyr_data)
return (zsig, body)
def parse_crypt_table(zephyr_class, instance):
# type: (Text, str) -> str
try:
crypt_table = open(os.path.join(os.environ["HOME"], ".crypt-table"))
except IOError:
return None
for line in crypt_table.readlines():
if line.strip() == "":
# Ignore blank lines
continue
match = re.match("^crypt-(?P<class>[^:]+):\s+((?P<algorithm>(AES|DES)):\s+)?(?P<keypath>\S+)$", line)
if match is None:
# Malformed crypt_table line
logger.debug("Invalid crypt_table line!")
continue
groups = match.groupdict()
if groups['class'].lower() == zephyr_class and 'keypath' in groups and \
groups.get("algorithm") == "AES":
return groups["keypath"]
return None
def decrypt_zephyr(zephyr_class, instance, body):
# type: (Text, str, str) -> str
keypath = parse_crypt_table(zephyr_class, instance)
if keypath is None:
# We can't decrypt it, so we just return the original body
return body
# Enable handling SIGCHLD briefly while we call into
# subprocess to avoid http://bugs.python.org/issue9127
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# decrypt the message!
p = subprocess.Popen(["gpg",
"--decrypt",
"--no-options",
"--no-default-keyring",
"--keyring=/dev/null",
"--secret-keyring=/dev/null",
"--batch",
"--quiet",
"--no-use-agent",
"--passphrase-file",
keypath],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
decrypted, _ = p.communicate(input=body)
# Restore our ignoring signals
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
return decrypted
def process_notice(notice, log):
# type: (zulip, IO) -> None
(zsig, body) = parse_zephyr_body(notice.message)
is_personal = False
is_huddle = False
if notice.opcode == "PING":
# skip PING messages
return
zephyr_class = notice.cls.lower()
if zephyr_class == options.nagios_class:
# Mark that we got the message and proceed
with open(options.nagios_path, "w") as f:
f.write("0\n")
return
if notice.recipient != "":
is_personal = True
# Drop messages not to the listed subscriptions
if is_personal and not options.forward_personals:
return
if (zephyr_class not in current_zephyr_subs) and not is_personal:
logger.debug("Skipping ... %s/%s/%s" %
(zephyr_class, notice.instance, is_personal))
return
if notice.format.startswith("Zephyr error: See") or notice.format.endswith("@(@color(blue))"):
logger.debug("Skipping message we got from Zulip!")
return
if (zephyr_class == "mail" and notice.instance.lower() == "inbox" and is_personal and
not options.forward_mail_zephyrs):
# Only forward mail zephyrs if forwarding them is enabled.
return
if is_personal:
if body.startswith("CC:"):
is_huddle = True
# Map "CC: user1 user2" => "[email protected], [email protected]"
huddle_recipients = [to_zulip_username(x.strip()) for x in
body.split("\n")[0][4:].split()]
if notice.sender not in huddle_recipients:
huddle_recipients.append(to_zulip_username(notice.sender))
body = body.split("\n", 1)[1]
if options.forward_class_messages and notice.opcode.lower() == "crypt":
body = decrypt_zephyr(zephyr_class, notice.instance.lower(), body)
zeph = {'time': str(notice.time),
'sender': notice.sender,
'zsig': zsig, # logged here but not used by app
'content': body}
if is_huddle:
zeph['type'] = 'private'
zeph['recipient'] = huddle_recipients
elif is_personal:
zeph['type'] = 'private'
zeph['recipient'] = to_zulip_username(notice.recipient)
else:
zeph['type'] = 'stream'
zeph['stream'] = zephyr_class
if notice.instance.strip() != "":
zeph['subject'] = notice.instance
else:
zeph["subject"] = '(instance "%s")' % (notice.instance,)
# Add instances in for instanced personals
if is_personal:
if notice.cls.lower() != "message" and notice.instance.lower != "personal":
heading = "[-c %s -i %s]\n" % (notice.cls, notice.instance)
elif notice.cls.lower() != "message":
heading = "[-c %s]\n" % (notice.cls,)
elif notice.instance.lower() != "personal":
heading = "[-i %s]\n" % (notice.instance,)
else:
heading = ""
zeph["content"] = heading + zeph["content"]
zeph = decode_unicode_byte_strings(zeph)
logger.info("Received a message on %s/%s from %s..." %
(zephyr_class, notice.instance, notice.sender))
if log is not None:
log.write(simplejson.dumps(zeph) + '\n')
log.flush()
if os.fork() == 0:
global CURRENT_STATE
CURRENT_STATE = States.ChildSending
# Actually send the message in a child process, to avoid blocking.
try:
res = send_zulip(zeph)
if res.get("result") != "success":
logger.error("Error relaying zephyr:\n%s\n%s" % (zeph, res))
except Exception:
logger.exception("Error relaying zephyr:")
finally:
os._exit(0)
def decode_unicode_byte_strings(zeph):
# type: (Dict[str, Any]) -> Dict[str, str]
# 'Any' can be of any type of text that is converted to str.
for field in zeph.keys():
if isinstance(zeph[field], str):
try:
decoded = zeph[field].decode("utf-8")
except Exception:
decoded = zeph[field].decode("iso-8859-1")
zeph[field] = decoded
return zeph
def quit_failed_initialization(message):
# type: (str) -> str
logger.error(message)
maybe_kill_child()
sys.exit(1)
def zephyr_init_autoretry():
# type: () -> None
backoff = zulip.RandomExponentialBackoff()
while backoff.keep_going():
try:
# zephyr.init() tries to clear old subscriptions, and thus
# sometimes gets a SERVNAK from the server
zephyr.init()
backoff.succeed()
return
except IOError:
logger.exception("Error initializing Zephyr library (retrying). Traceback:")
backoff.fail()
quit_failed_initialization("Could not initialize Zephyr library, quitting!")
def zephyr_load_session_autoretry(session_path):
# type: (str) -> None
backoff = zulip.RandomExponentialBackoff()
while backoff.keep_going():
try:
session = open(session_path, "r").read()
zephyr._z.initialize()
zephyr._z.load_session(session)
zephyr.__inited = True
return
except IOError:
logger.exception("Error loading saved Zephyr session (retrying). Traceback:")
backoff.fail()
quit_failed_initialization("Could not load saved Zephyr session, quitting!")
def zephyr_subscribe_autoretry(sub):
# type: (Tuple[str, str, str]) -> None
backoff = zulip.RandomExponentialBackoff()
while backoff.keep_going():
try:
zephyr.Subscriptions().add(sub)
backoff.succeed()
return
except IOError:
# Probably a SERVNAK from the zephyr server, but log the
# traceback just in case it's something else
logger.exception("Error subscribing to personals (retrying). Traceback:")
backoff.fail()
quit_failed_initialization("Could not subscribe to personals, quitting!")
def zephyr_to_zulip(options):
# type: (Any) -> None
if options.use_sessions and os.path.exists(options.session_path):
logger.info("Loading old session")
zephyr_load_session_autoretry(options.session_path)
else:
zephyr_init_autoretry()
if options.forward_class_messages:
update_subscriptions()
if options.forward_personals:
# Subscribe to personals; we really can't operate without
# those subscriptions, so just retry until it works.
zephyr_subscribe_autoretry(("message", "*", "%me%"))
zephyr_subscribe_autoretry(("mail", "inbox", "%me%"))
if options.nagios_class:
zephyr_subscribe_autoretry((options.nagios_class, "*", "*"))
if options.use_sessions:
open(options.session_path, "w").write(zephyr._z.dump_session())
if options.logs_to_resend is not None:
with open(options.logs_to_resend, 'r') as log:
for ln in log:
try:
zeph = simplejson.loads(ln)
# New messages added to the log shouldn't have any
# elements of type str (they should already all be
# unicode), but older messages in the log are
# still of type str, so convert them before we
# send the message
zeph = decode_unicode_byte_strings(zeph)
# Handle importing older zephyrs in the logs
# where it isn't called a "stream" yet
if "class" in zeph:
zeph["stream"] = zeph["class"]
if "instance" in zeph:
zeph["subject"] = zeph["instance"]
logger.info("sending saved message to %s from %s..." %
(zeph.get('stream', zeph.get('recipient')),
zeph['sender']))
send_zulip(zeph)
except Exception:
logger.exception("Could not send saved zephyr:")
time.sleep(2)
logger.info("Successfully initialized; Starting receive loop.")
if options.resend_log_path is not None:
with open(options.resend_log_path, 'a') as log:
process_loop(log)
else:
process_loop(None)
def send_zephyr(zwrite_args, content):
# type: (list, str) -> Tuple[int, str]
p = subprocess.Popen(zwrite_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(input=content.encode("utf-8"))
if p.returncode:
logger.error("zwrite command '%s' failed with return code %d:" % (
" ".join(zwrite_args), p.returncode,))
if stdout:
logger.info("stdout: " + stdout)
elif stderr:
logger.warning("zwrite command '%s' printed the following warning:" % (
" ".join(zwrite_args),))
if stderr:
logger.warning("stderr: " + stderr)
return (p.returncode, stderr)
def send_authed_zephyr(zwrite_args, content):
# type: (list[str], str) -> Tuple[int, str]
return send_zephyr(zwrite_args, content)
def send_unauthed_zephyr(zwrite_args, content):
# type: (list[str], str) -> Tuple[int, str]
return send_zephyr(zwrite_args + ["-d"], content)
def zcrypt_encrypt_content(zephyr_class, instance, content):
# type: (str, str, str) -> str
keypath = parse_crypt_table(zephyr_class, instance)
if keypath is None:
return None
# encrypt the message!
p = subprocess.Popen(["gpg",
"--symmetric",
"--no-options",
"--no-default-keyring",
"--keyring=/dev/null",
"--secret-keyring=/dev/null",
"--batch",
"--quiet",
"--no-use-agent",
"--armor",
"--cipher-algo", "AES",
"--passphrase-file",
keypath],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
encrypted, _ = p.communicate(input=content)
return encrypted
def forward_to_zephyr(message):
# type: (Dict[str, Any]) -> None
# 'Any' can be of any type of text
support_heading = "Hi there! This is an automated message from Zulip."
support_closing = """If you have any questions, please be in touch through the \
Feedback button or at [email protected]."""
wrapper = textwrap.TextWrapper(break_long_words=False, break_on_hyphens=False)
wrapped_content = "\n".join("\n".join(wrapper.wrap(line))
for line in message["content"].replace("@", "@@").split("\n"))
zwrite_args = ["zwrite", "-n", "-s", message["sender_full_name"],
"-F", "Zephyr error: See http://zephyr.1ts.org/wiki/df",
"-x", "UTF-8"]
# Hack to make ctl's fake username setup work :)
if message['type'] == "stream" and zulip_account_email == "[email protected]":
zwrite_args.extend(["-S", "ctl"])
if message['type'] == "stream":
zephyr_class = message["display_recipient"]
instance = message["subject"]
match_whitespace_instance = re.match(r'^\(instance "(\s*)"\)$', instance)
if match_whitespace_instance:
# Forward messages sent to '(instance "WHITESPACE")' back to the
# appropriate WHITESPACE instance for bidirectional mirroring
instance = match_whitespace_instance.group(1)
elif (instance == "instance %s" % (zephyr_class,) or
instance == "test instance %s" % (zephyr_class,)):
# Forward messages to e.g. -c -i white-magic back from the
# place we forward them to
if instance.startswith("test"):
instance = zephyr_class
zephyr_class = "tabbott-test5"
else:
instance = zephyr_class
zephyr_class = "message"
zwrite_args.extend(["-c", zephyr_class, "-i", instance])
logger.info("Forwarding message to class %s, instance %s" % (zephyr_class, instance))
elif message['type'] == "private":
if len(message['display_recipient']) == 1:
recipient = to_zephyr_username(message["display_recipient"][0]["email"])
recipients = [recipient]
elif len(message['display_recipient']) == 2:
recipient = ""
for r in message["display_recipient"]:
if r["email"].lower() != zulip_account_email.lower():
recipient = to_zephyr_username(r["email"])
break
recipients = [recipient]
else:
zwrite_args.extend(["-C"])
# We drop the @ATHENA.MIT.EDU here because otherwise the
# "CC: user1 user2 ..." output will be unnecessarily verbose.
recipients = [to_zephyr_username(user["email"]).replace("@ATHENA.MIT.EDU", "")
for user in message["display_recipient"]]
logger.info("Forwarding message to %s" % (recipients,))
zwrite_args.extend(recipients)
if message.get("invite_only_stream"):
result = zcrypt_encrypt_content(zephyr_class, instance, wrapped_content)
if result is None:
send_error_zulip("""%s
Your Zulip-Zephyr mirror bot was unable to forward that last message \
from Zulip to Zephyr because you were sending to a zcrypted Zephyr \
class and your mirroring bot does not have access to the relevant \
key (perhaps because your AFS tokens expired). That means that while \
Zulip users (like you) received it, Zephyr users did not.
%s""" % (support_heading, support_closing))
return
# Proceed with sending a zcrypted message
wrapped_content = result
zwrite_args.extend(["-O", "crypt"])
if options.test_mode:
logger.debug("Would have forwarded: %s\n%s" %
(zwrite_args, wrapped_content.encode("utf-8")))
return
(code, stderr) = send_authed_zephyr(zwrite_args, wrapped_content)
if code == 0 and stderr == "":
return
elif code == 0:
send_error_zulip("""%s
Your last message was successfully mirrored to zephyr, but zwrite \
returned the following warning:
%s
%s""" % (support_heading, stderr, support_closing))
return
elif code != 0 and (stderr.startswith("zwrite: Ticket expired while sending notice to ") or
stderr.startswith("zwrite: No credentials cache found while sending notice to ")):
# Retry sending the message unauthenticated; if that works,
# just notify the user that they need to renew their tickets
(code, stderr) = send_unauthed_zephyr(zwrite_args, wrapped_content)
if code == 0:
if options.ignore_expired_tickets:
return
send_error_zulip("""%s
Your last message was forwarded from Zulip to Zephyr unauthenticated, \
because your Kerberos tickets have expired. It was sent successfully, \
but please renew your Kerberos tickets in the screen session where you \
are running the Zulip-Zephyr mirroring bot, so we can send \
authenticated Zephyr messages for you again.
%s""" % (support_heading, support_closing))
return
# zwrite failed and it wasn't because of expired tickets: This is
# probably because the recipient isn't subscribed to personals,
# but regardless, we should just notify the user.
send_error_zulip("""%s
Your Zulip-Zephyr mirror bot was unable to forward that last message \
from Zulip to Zephyr. That means that while Zulip users (like you) \
received it, Zephyr users did not. The error message from zwrite was:
%s
%s""" % (support_heading, stderr, support_closing))
return
def maybe_forward_to_zephyr(message):
# type: (Dict[str, Any]) -> None
# The key string can be used to direct any type of text.
if (message["sender_email"] == zulip_account_email):
if not ((message["type"] == "stream") or
(message["type"] == "private" and
False not in [u["email"].lower().endswith("mit.edu") for u in
message["display_recipient"]])):
# Don't try forward private messages with non-MIT users
# to MIT Zephyr.
return
timestamp_now = int(time.time())
if float(message["timestamp"]) < timestamp_now - 15:
logger.warning("Skipping out of order message: %s < %s" %
(message["timestamp"], timestamp_now))
return
try:
forward_to_zephyr(message)
except Exception:
# Don't let an exception forwarding one message crash the
# whole process
logger.exception("Error forwarding message:")
def zulip_to_zephyr(options):
# type: (int) -> None
# Sync messages from zulip to zephyr
logger.info("Starting syncing messages.")
while True:
try:
zulip_client.call_on_each_message(maybe_forward_to_zephyr)
except Exception:
logger.exception("Error syncing messages:")
time.sleep(1)
def subscribed_to_mail_messages():
# type: () -> bool
# In case we have lost our AFS tokens and those won't be able to
# parse the Zephyr subs file, first try reading in result of this
# query from the environment so we can avoid the filesystem read.
stored_result = os.environ.get("HUMBUG_FORWARD_MAIL_ZEPHYRS")
if stored_result is not None:
return stored_result == "True"
for (cls, instance, recipient) in parse_zephyr_subs(verbose=False):
if (cls.lower() == "mail" and instance.lower() == "inbox"):
os.environ["HUMBUG_FORWARD_MAIL_ZEPHYRS"] = "True"
return True
os.environ["HUMBUG_FORWARD_MAIL_ZEPHYRS"] = "False"
return False
def add_zulip_subscriptions(verbose):
# type: (bool) -> None
zephyr_subscriptions = set()
skipped = set()
for (cls, instance, recipient) in parse_zephyr_subs(verbose=verbose):
if cls.lower() == "message":
if recipient != "*":
# We already have a (message, *, you) subscription, so
# these are redundant
continue
# We don't support subscribing to (message, *)
if instance == "*":
if recipient == "*":
skipped.add((cls, instance, recipient, "subscribing to all of class message is not supported."))
continue
# If you're on -i white-magic on zephyr, get on stream white-magic on zulip
# instead of subscribing to stream "message" on zulip
zephyr_subscriptions.add(instance)
continue
elif cls.lower() == "mail" and instance.lower() == "inbox":
# We forward mail zephyrs, so no need to log a warning.
continue
elif len(cls) > 60:
skipped.add((cls, instance, recipient, "Class longer than 60 characters"))
continue
elif instance != "*":
skipped.add((cls, instance, recipient, "Unsupported non-* instance"))
continue
elif recipient != "*":
skipped.add((cls, instance, recipient, "Unsupported non-* recipient."))
continue
zephyr_subscriptions.add(cls)
if len(zephyr_subscriptions) != 0:
res = zulip_client.add_subscriptions(list({"name": stream} for stream in zephyr_subscriptions),
authorization_errors_fatal=False)
if res.get("result") != "success":
logger.error("Error subscribing to streams:\n%s" % (res["msg"],))
return
already = res.get("already_subscribed")
new = res.get("subscribed")
unauthorized = res.get("unauthorized")
if verbose:
if already is not None and len(already) > 0:
logger.info("\nAlready subscribed to: %s" % (", ".join(list(already.values())[0]),))
if new is not None and len(new) > 0:
logger.info("\nSuccessfully subscribed to: %s" % (", ".join(list(new.values())[0]),))
if unauthorized is not None and len(unauthorized) > 0:
logger.info("\n" + "\n".join(textwrap.wrap("""\
The following streams you have NOT been subscribed to,
because they have been configured in Zulip as invitation-only streams.
This was done at the request of users of these Zephyr classes, usually
because traffic to those streams is sent within the Zephyr world encrypted
via zcrypt (in Zulip, we achieve the same privacy goals through invitation-only streams).
If you wish to read these streams in Zulip, you need to contact the people who are
on these streams and already use Zulip. They can subscribe you to them via the
"streams" page in the Zulip web interface:
""")) + "\n\n %s" % (", ".join(unauthorized),))
if len(skipped) > 0:
if verbose:
logger.info("\n" + "\n".join(textwrap.wrap("""\
You have some lines in ~/.zephyr.subs that could not be
synced to your Zulip subscriptions because they do not
use "*" as both the instance and recipient and not one of
the special cases (e.g. personals and mail zephyrs) that
Zulip has a mechanism for forwarding. Zulip does not
allow subscribing to only some subjects on a Zulip
stream, so this tool has not created a corresponding
Zulip subscription to these lines in ~/.zephyr.subs:
""")) + "\n")
for (cls, instance, recipient, reason) in skipped:
if verbose:
if reason != "":
logger.info(" [%s,%s,%s] (%s)" % (cls, instance, recipient, reason))
else:
logger.info(" [%s,%s,%s]" % (cls, instance, recipient))
if len(skipped) > 0:
if verbose:
logger.info("\n" + "\n".join(textwrap.wrap("""\
If you wish to be subscribed to any Zulip streams related
to these .zephyrs.subs lines, please do so via the Zulip
web interface.
""")) + "\n")
def valid_stream_name(name):
# type: (str) -> bool
return name != ""
def parse_zephyr_subs(verbose=False):
# type: (bool) -> Union[List, Tuple, Set[Tuple[str, str, str]]]
zephyr_subscriptions = set()
subs_file = os.path.join(os.environ["HOME"], ".zephyr.subs")
if not os.path.exists(subs_file):
if verbose:
logger.error("Couldn't find ~/.zephyr.subs!")
return []
for line in open(subs_file, "r").readlines():
line = line.strip()
if len(line) == 0:
continue
try:
(cls, instance, recipient) = line.split(",")
cls = cls.replace("%me%", options.user)
instance = instance.replace("%me%", options.user)
recipient = recipient.replace("%me%", options.user)
if not valid_stream_name(cls):
if verbose:
logger.error("Skipping subscription to unsupported class name: [%s]" % (line,))
continue
except Exception:
if verbose:
logger.error("Couldn't parse ~/.zephyr.subs line: [%s]" % (line,))
continue
zephyr_subscriptions.add((cls.strip(), instance.strip(), recipient.strip()))
return zephyr_subscriptions
def open_logger():
# type: () -> logging.Logger
if options.log_path is not None:
log_file = options.log_path
elif options.forward_class_messages:
if options.test_mode:
log_file = "/var/log/zulip/test-mirror-log"
else:
log_file = "/var/log/zulip/mirror-log"
else:
f = tempfile.NamedTemporaryFile(prefix="zulip-log.%s." % (options.user,),
delete=False)
log_file = f.name
# Close the file descriptor, since the logging system will
# reopen it anyway.
f.close()
logger = logging.getLogger(__name__)
log_format = "%(asctime)s <initial>: %(message)s"
formatter = logging.Formatter(log_format)
logging.basicConfig(format=log_format)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def configure_logger(logger, direction_name):
# type: (logging.Logger, str) -> None
if direction_name is None:
log_format = "%(message)s"
else:
log_format = "%(asctime)s [" + direction_name + "] %(message)s"
formatter = logging.Formatter(log_format)
# Replace the formatters for the file and stdout loggers
for handler in logger.handlers:
handler.setFormatter(formatter)
root_logger = logging.getLogger()
for handler in root_logger.handlers:
handler.setFormatter(formatter)
def parse_args():
# type: () -> Tuple
parser = optparse.OptionParser()
parser.add_option('--forward-class-messages',
default=False,
help=optparse.SUPPRESS_HELP,
action='store_true')
parser.add_option('--shard',
help=optparse.SUPPRESS_HELP)
parser.add_option('--noshard',
default=False,
help=optparse.SUPPRESS_HELP,
action='store_true')
parser.add_option('--resend-log',
dest='logs_to_resend',
help=optparse.SUPPRESS_HELP)
parser.add_option('--enable-resend-log',
dest='resend_log_path',
help=optparse.SUPPRESS_HELP)
parser.add_option('--log-path',
dest='log_path',
help=optparse.SUPPRESS_HELP)
parser.add_option('--stream-file-path',
dest='stream_file_path',
default="/home/zulip/public_streams",
help=optparse.SUPPRESS_HELP)
parser.add_option('--no-forward-personals',
dest='forward_personals',
help=optparse.SUPPRESS_HELP,
default=True,
action='store_false')
parser.add_option('--forward-mail-zephyrs',
dest='forward_mail_zephyrs',
help=optparse.SUPPRESS_HELP,
default=False,
action='store_true')
parser.add_option('--no-forward-from-zulip',
default=True,
dest='forward_from_zulip',
help=optparse.SUPPRESS_HELP,
action='store_false')
parser.add_option('--verbose',
default=False,
help=optparse.SUPPRESS_HELP,
action='store_true')
parser.add_option('--sync-subscriptions',
default=False,
action='store_true')
parser.add_option('--ignore-expired-tickets',
default=False,
action='store_true')
parser.add_option('--site',
default=DEFAULT_SITE,
help=optparse.SUPPRESS_HELP)
parser.add_option('--on-startup-command',
default=None,
help=optparse.SUPPRESS_HELP)
parser.add_option('--user',
default=os.environ["USER"],
help=optparse.SUPPRESS_HELP)
parser.add_option('--root-path',
default="/afs/athena.mit.edu/user/t/a/tabbott/for_friends",
help=optparse.SUPPRESS_HELP)
parser.add_option('--session-path',
default=None,
help=optparse.SUPPRESS_HELP)
parser.add_option('--nagios-class',
default=None,
help=optparse.SUPPRESS_HELP)
parser.add_option('--nagios-path',
default=None,
help=optparse.SUPPRESS_HELP)
parser.add_option('--use-sessions',
default=False,
action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option('--test-mode',
default=False,
help=optparse.SUPPRESS_HELP,
action='store_true')
parser.add_option('--api-key-file',
default=os.path.join(os.environ["HOME"], "Private", ".humbug-api-key"))
return parser.parse_args()
def die_gracefully(signal, frame):
# type: (int, FrameType) -> None
if CURRENT_STATE == States.ZulipToZephyr or CURRENT_STATE == States.ChildSending:
# this is a child process, so we want os._exit (no clean-up necessary)
os._exit(1)
if CURRENT_STATE == States.ZephyrToZulip and not options.use_sessions:
try:
# zephyr=>zulip processes may have added subs, so run cancelSubs
zephyr._z.cancelSubs()
except IOError:
# We don't care whether we failed to cancel subs properly, but we should log it
logger.exception("")
sys.exit(1)
if __name__ == "__main__":
# Set the SIGCHLD handler back to SIG_DFL to prevent these errors
# when importing the "requests" module after being restarted using
# the restart_stamp functionality:
#
# close failed in file object destructor:
# IOError: [Errno 10] No child processes
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
signal.signal(signal.SIGINT, die_gracefully)
# The properties available on 'options' are dynamically
# determined, so we have to treat it as an Any for type
# annotations.
(options, args) = parse_args() # type: Any, List[str]
logger = open_logger()
configure_logger(logger, "parent")
# The 'api' directory needs to go first, so that 'import zulip' won't pick
# up some other directory named 'humbug'.
pyzephyr_lib_path = "python-zephyr/build/lib.linux-%s-%s/" % (os.uname()[4], sys.version[0:3])
sys.path[:0] = [os.path.join(options.root_path, 'api'),
options.root_path,
os.path.join(options.root_path, "python-zephyr"),
os.path.join(options.root_path, pyzephyr_lib_path)]
# In case this is an automated restart of the mirroring script,
# and we have lost AFS tokens, first try reading the API key from
# the environment so that we can skip doing a filesystem read.
if os.environ.get("HUMBUG_API_KEY") is not None:
api_key = os.environ.get("HUMBUG_API_KEY")
else:
if not os.path.exists(options.api_key_file):
logger.error("\n" + "\n".join(textwrap.wrap("""\
Could not find API key file.
You need to either place your api key file at %s,
or specify the --api-key-file option.""" % (options.api_key_file,))))
sys.exit(1)
api_key = open(options.api_key_file).read().strip()
# Store the API key in the environment so that our children
# don't need to read it in
os.environ["HUMBUG_API_KEY"] = api_key
if options.nagios_path is None and options.nagios_class is not None:
logger.error("\n" + "nagios_path is required with nagios_class\n")
sys.exit(1)
zulip_account_email = options.user + "@mit.edu"
import zulip
zulip_client = zulip.Client(
email=zulip_account_email,
api_key=api_key,
verbose=True,
client="zephyr_mirror",
site=options.site)
start_time = time.time()
if options.sync_subscriptions:
configure_logger(logger, None) # make the output cleaner
logger.info("Syncing your ~/.zephyr.subs to your Zulip Subscriptions!")
add_zulip_subscriptions(True)
sys.exit(0)
# Kill all zephyr_mirror processes other than this one and its parent.
if not options.test_mode:
pgrep_query = "python.*zephyr_mirror"
if options.shard is not None:
# sharded class mirror
pgrep_query = "%s.*--shard=%s" % (pgrep_query, options.shard)
elif options.user is not None:
# Personals mirror on behalf of another user.
pgrep_query = "%s.*--user=%s" % (pgrep_query, options.user)
proc = subprocess.Popen(['pgrep', '-U', os.environ["USER"], "-f", pgrep_query],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, _err_unused = proc.communicate()
for pid in map(int, out.split()):
if pid == os.getpid() or pid == os.getppid():
continue
# Another copy of zephyr_mirror.py! Kill it.
logger.info("Killing duplicate zephyr_mirror process %s" % (pid,))
try:
os.kill(pid, signal.SIGINT)
except OSError:
# We don't care if the target process no longer exists, so just log the error
logger.exception("")
if options.shard is not None and set(options.shard) != set("a"):
# The shard that is all "a"s is the one that handles personals
# forwarding and zulip => zephyr forwarding
options.forward_personals = False
options.forward_from_zulip = False
if options.forward_mail_zephyrs is None:
options.forward_mail_zephyrs = subscribed_to_mail_messages()
if options.session_path is None:
options.session_path = "/var/tmp/%s" % (options.user,)
if options.forward_from_zulip:
child_pid = os.fork() # type: int
if child_pid == 0:
CURRENT_STATE = States.ZulipToZephyr
# Run the zulip => zephyr mirror in the child
configure_logger(logger, "zulip=>zephyr")
zulip_to_zephyr(options)
sys.exit(0)
else:
child_pid = None
CURRENT_STATE = States.ZephyrToZulip
import zephyr
logger_name = "zephyr=>zulip"
if options.shard is not None:
logger_name += "(%s)" % (options.shard,)
configure_logger(logger, logger_name)
# Have the kernel reap children for when we fork off processes to send Zulips
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
zephyr_to_zulip(options)<|fim▁end|> | # trigger proper Markdown processing on things like
# bulleted lists
result += previous_line + "\n\n" |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | # Copyright (c) 2010-2017 openpyxl |
<|file_name|>AlunosPesoAlturaImc.py<|end_file_name|><|fim▁begin|>#Faça um programa que o usuário informe a quantidade de alunos de uma turma, o sistema deve ler o peso e altura de cada
# aluno, ao final informar o imc.
T= int(input("entre com a quantidade de alunos"))
x=0
soma=0
somapesos=0
somaltura=0
while (x<T):
P= float(input("entre com o peso do aluno"))
A= float(input("entre com a altura do aluno"))
IMC=P/(A*A)
print("IMC é ", IMC)
<|fim▁hole|> x=x+1<|fim▁end|> | |
<|file_name|>adiabatic_flame.py<|end_file_name|><|fim▁begin|>#
# ADIABATIC_FLAME - A freely-propagating, premixed methane/air flat
# flame with multicomponent transport properties
#
from Cantera import *
from Cantera.OneD import *
from Cantera.OneD.FreeFlame import FreeFlame
################################################################
#
# parameter values
#
p = OneAtm # pressure
tin = 300.0 # unburned gas temperature
mdot = 0.04 # kg/m^2/s
comp = 'CH4:0.45, O2:1, N2:3.76' # premixed gas composition
initial_grid = [0.0, 0.001, 0.01, 0.02, 0.029, 0.03] # m
tol_ss = [1.0e-5, 1.0e-9] # [rtol atol] for steady-state
# problem
tol_ts = [1.0e-5, 1.0e-9] # [rtol atol] for time stepping
loglevel = 1 # amount of diagnostic output (0
# to 5)
refine_grid = 1 # 1 to enable refinement, 0 to
# disable
gas = GRI30('Mix')
gas.addTransportModel('Multi')
# set its state to that of the unburned gas
gas.setState_TPX(tin, p, comp)
f = FreeFlame(gas = gas, grid = initial_grid, tfix = 600.0)
# set the upstream properties
f.inlet.set(mole_fractions = comp, temperature = tin)
f.set(tol = tol_ss, tol_time = tol_ts)
f.showSolution()
f.set(energy = 'off')
f.setRefineCriteria(ratio = 10.0, slope = 1, curve = 1)
f.setMaxJacAge(50, 50)
f.setTimeStep(1.0e-5, [2, 5, 10, 20, 50])
f.solve(loglevel, refine_grid)
f.save('ch4_adiabatic.xml','no_energy',
'solution with the energy equation disabled')
f.set(energy = 'on')
f.setRefineCriteria(ratio = 3.0, slope = 0.1, curve = 0.2)
f.solve(loglevel, refine_grid)
f.save('ch4_adiabatic.xml','energy',
'solution with the energy equation enabled')
print 'mixture-averaged flamespeed = ',f.u()[0]
gas.switchTransportModel('Multi')
f.flame.setTransportModel(gas)
f.solve(loglevel, refine_grid)
f.save('ch4_adiabatic.xml','energy_multi',
'solution with the energy equation enabled and multicomponent transport')
# write the velocity, temperature, density, and mole fractions to a CSV file
z = f.flame.grid()
T = f.T()
u = f.u()
V = f.V()
fcsv = open('adiabatic_flame.csv','w')
writeCSV(fcsv, ['z (m)', 'u (m/s)', 'V (1/s)', 'T (K)', 'rho (kg/m3)']
+ list(gas.speciesNames()))
for n in range(f.flame.nPoints()):
f.setGasState(n)
writeCSV(fcsv, [z[n], u[n], V[n], T[n], gas.density()]
+list(gas.moleFractions()))
fcsv.close()
print 'solution saved to adiabatic_flame.csv'
print 'multicomponent flamespeed = ',u[0]<|fim▁hole|><|fim▁end|> | f.showStats() |
<|file_name|>format.js<|end_file_name|><|fim▁begin|>'use strict';
module.exports = function generate_format(it, $keyword) {
var out = ' ';
var $lvl = it.level;
var $dataLvl = it.dataLevel;
var $schema = it.schema[$keyword];
var $schemaPath = it.schemaPath + it.util.getProperty($keyword);
var $errSchemaPath = it.errSchemaPath + '/' + $keyword;
var $breakOnError = !it.opts.allErrors;
var $data = 'data' + ($dataLvl || '');
if (it.opts.format === false) {
if ($breakOnError) {
out += ' if (true) { ';
}
return out;
}
var $isData = it.opts.v5 && $schema && $schema.$data,
$schemaValue;
if ($isData) {
out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; ';
$schemaValue = 'schema' + $lvl;
} else {
$schemaValue = $schema;
}
var $unknownFormats = it.opts.unknownFormats,
$allowUnknown = Array.isArray($unknownFormats);
if ($isData) {
var $format = 'format' + $lvl;
out += ' var ' + ($format) + ' = formats[' + ($schemaValue) + ']; var isObject' + ($lvl) + ' = typeof ' + ($format) + ' == \'object\' && !(' + ($format) + ' instanceof RegExp) && ' + ($format) + '.validate; if (isObject' + ($lvl) + ') { ';
if (it.async) {
out += ' var async' + ($lvl) + ' = ' + ($format) + '.async; ';
}
out += ' ' + ($format) + ' = ' + ($format) + '.validate; } if ( ';
if ($isData) {
out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'string\') || ';
}
out += ' (';
if ($unknownFormats === true || $allowUnknown) {
out += ' (' + ($schemaValue) + ' && !' + ($format) + ' ';
if ($allowUnknown) {
out += ' && self._opts.unknownFormats.indexOf(' + ($schemaValue) + ') == -1 ';
}
out += ') || ';
}
out += ' (' + ($format) + ' && !(typeof ' + ($format) + ' == \'function\' ? ';
if (it.async) {
out += ' (async' + ($lvl) + ' ? ' + (it.yieldAwait) + ' ' + ($format) + '(' + ($data) + ') : ' + ($format) + '(' + ($data) + ')) ';
} else {
out += ' ' + ($format) + '(' + ($data) + ') ';
}
out += ' : ' + ($format) + '.test(' + ($data) + '))))) {';
} else {
var $format = it.formats[$schema];
if (!$format) {
if ($unknownFormats === true || ($allowUnknown && $unknownFormats.indexOf($schema) == -1)) {
throw new Error('unknown format "' + $schema + '" is used in schema at path "' + it.errSchemaPath + '"');
} else {
if (!$allowUnknown) {
console.warn('unknown format "' + $schema + '" ignored in schema at path "' + it.errSchemaPath + '"');
if ($unknownFormats !== 'ignore') console.warn('In the next major version it will throw exception. See option unknownFormats for more information');
}
if ($breakOnError) {
out += ' if (true) { ';
}
return out;
}
}
var $isObject = typeof $format == 'object' && !($format instanceof RegExp) && $format.validate;
if ($isObject) {
var $async = $format.async === true;
$format = $format.validate;
}
if ($async) {
if (!it.async) throw new Error('async format in sync schema');
var $formatRef = 'formats' + it.util.getProperty($schema) + '.validate';
out += ' if (!(' + (it.yieldAwait) + ' ' + ($formatRef) + '(' + ($data) + '))) { ';
} else {
out += ' if (! ';
var $formatRef = 'formats' + it.util.getProperty($schema);
if ($isObject) $formatRef += '.validate';
if (typeof $format == 'function') {
out += ' ' + ($formatRef) + '(' + ($data) + ') ';
} else {
out += ' ' + ($formatRef) + '.test(' + ($data) + ') ';
}
<|fim▁hole|> $$outStack.push(out);
out = ''; /* istanbul ignore else */
if (it.createErrors !== false) {
out += ' { keyword: \'' + ('format') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { format: ';
if ($isData) {
out += '' + ($schemaValue);
} else {
out += '' + (it.util.toQuotedString($schema));
}
out += ' } ';
if (it.opts.messages !== false) {
out += ' , message: \'should match format "';
if ($isData) {
out += '\' + ' + ($schemaValue) + ' + \'';
} else {
out += '' + (it.util.escapeQuotes($schema));
}
out += '"\' ';
}
if (it.opts.verbose) {
out += ' , schema: ';
if ($isData) {
out += 'validate.schema' + ($schemaPath);
} else {
out += '' + (it.util.toQuotedString($schema));
}
out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' ';
}
out += ' } ';
} else {
out += ' {} ';
}
var __err = out;
out = $$outStack.pop();
if (!it.compositeRule && $breakOnError) { /* istanbul ignore if */
if (it.async) {
out += ' throw new ValidationError([' + (__err) + ']); ';
} else {
out += ' validate.errors = [' + (__err) + ']; return false; ';
}
} else {
out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ';
}
out += ' } ';
if ($breakOnError) {
out += ' else { ';
}
return out;
}<|fim▁end|> | out += ') { ';
}
}
var $$outStack = $$outStack || [];
|
<|file_name|>demo.py<|end_file_name|><|fim▁begin|>a = "nabb jasj jjs, jjsajdhh kjkda jj"
a1 = a.split(",")<|fim▁hole|>for i in range(0,len(a1)):
print (len(a1[i].split()))<|fim▁end|> | |
<|file_name|>ProfileAction.py<|end_file_name|><|fim▁begin|>from .Base_Action import *
<|fim▁hole|>class ProfileAction(Base_Action):
def __init__(self, action_xml, root_action=None):
super(self.__class__, self).__init__(action_xml, root_action)
self.shouldUseLaunchSchemeArgsEnv = self.contents.get('shouldUseLaunchSchemeArgsEnv');
self.savedToolIdentifier = self.contents.get('savedToolIdentifier');
self.useCustomWorkingDirectory = self.contents.get('useCustomWorkingDirectory');
self.buildConfiguration = self.contents.get('buildConfiguration');
self.debugDocumentVersioning = self.contents.get('debugDocumentVersioning');<|fim▁end|> | |
<|file_name|>arc_auth_service.cc<|end_file_name|><|fim▁begin|>// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ash/arc/auth/arc_auth_service.h"
#include <utility>
#include <vector>
#include "ash/components/arc/arc_browser_context_keyed_service_factory_base.h"
#include "ash/components/arc/arc_features.h"
#include "ash/components/arc/arc_prefs.h"
#include "ash/components/arc/arc_util.h"
#include "ash/components/arc/enterprise/arc_data_snapshotd_manager.h"
#include "ash/components/arc/session/arc_bridge_service.h"
#include "ash/components/arc/session/arc_management_transition.h"
#include "ash/components/arc/session/arc_service_manager.h"
#include "base/bind.h"
#include "base/command_line.h"
#include "base/memory/singleton.h"
#include "base/strings/utf_string_conversions.h"
#include "base/time/time.h"
#include "chrome/browser/ash/account_manager/account_manager_util.h"
#include "chrome/browser/ash/arc/arc_optin_uma.h"
#include "chrome/browser/ash/arc/arc_util.h"
#include "chrome/browser/ash/arc/auth/arc_background_auth_code_fetcher.h"
#include "chrome/browser/ash/arc/auth/arc_robot_auth_code_fetcher.h"
#include "chrome/browser/ash/arc/policy/arc_policy_util.h"
#include "chrome/browser/ash/arc/session/arc_provisioning_result.h"
#include "chrome/browser/ash/arc/session/arc_session_manager.h"
#include "chrome/browser/ash/login/demo_mode/demo_session.h"
#include "chrome/browser/ash/profiles/profile_helper.h"
#include "chrome/browser/lifetime/application_lifetime.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/signin/identity_manager_factory.h"
#include "chrome/browser/signin/signin_ui_util.h"
#include "chrome/browser/ui/app_list/arc/arc_data_removal_dialog.h"
#include "chrome/browser/ui/settings_window_manager_chromeos.h"
#include "chrome/browser/ui/webui/settings/chromeos/constants/routes.mojom.h"
#include "chrome/browser/ui/webui/signin/inline_login_dialog_chromeos.h"
#include "chrome/common/webui_url_constants.h"
#include "components/account_manager_core/account_manager_facade.h"
#include "components/account_manager_core/chromeos/account_manager_facade_factory.h"
#include "components/prefs/pref_service.h"
#include "components/signin/public/base/consent_level.h"
#include "components/user_manager/user_manager.h"
#include "content/public/browser/browser_context.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/browser/storage_partition.h"
#include "services/network/public/cpp/shared_url_loader_factory.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
namespace arc {
namespace {
// Singleton factory for ArcAuthService.
class ArcAuthServiceFactory
: public internal::ArcBrowserContextKeyedServiceFactoryBase<
ArcAuthService,
ArcAuthServiceFactory> {
public:
// Factory name used by ArcBrowserContextKeyedServiceFactoryBase.
static constexpr const char* kName = "ArcAuthServiceFactory";
static ArcAuthServiceFactory* GetInstance() {
return base::Singleton<ArcAuthServiceFactory>::get();
}
private:
friend struct base::DefaultSingletonTraits<ArcAuthServiceFactory>;
ArcAuthServiceFactory() { DependsOn(IdentityManagerFactory::GetInstance()); }
~ArcAuthServiceFactory() override = default;
};
mojom::ChromeAccountType GetAccountType(const Profile* profile) {
if (profile->IsChild())
return mojom::ChromeAccountType::CHILD_ACCOUNT;
if (IsActiveDirectoryUserForProfile(profile))
return mojom::ChromeAccountType::ACTIVE_DIRECTORY_ACCOUNT;
auto* demo_session = ash::DemoSession::Get();
if (demo_session && demo_session->started()) {
// Internally, demo mode is implemented as a public session, and should
// generally follow normal robot account provisioning flow. Offline enrolled
// demo mode is an exception, as it is expected to work purely offline, with
// a (fake) robot account not known to auth service - this means that it has
// to go through different, offline provisioning flow.
DCHECK(IsRobotOrOfflineDemoAccountMode());
return demo_session->offline_enrolled()
? mojom::ChromeAccountType::OFFLINE_DEMO_ACCOUNT
: mojom::ChromeAccountType::ROBOT_ACCOUNT;
}
return IsRobotOrOfflineDemoAccountMode()
? mojom::ChromeAccountType::ROBOT_ACCOUNT
: mojom::ChromeAccountType::USER_ACCOUNT;
}
mojom::AccountInfoPtr CreateAccountInfo(bool is_enforced,
const std::string& auth_info,
const std::string& account_name,
mojom::ChromeAccountType account_type,
bool is_managed) {
mojom::AccountInfoPtr account_info = mojom::AccountInfo::New();
account_info->account_name = account_name;
if (account_type == mojom::ChromeAccountType::ACTIVE_DIRECTORY_ACCOUNT) {
account_info->enrollment_token = auth_info;
} else {
if (!is_enforced)
account_info->auth_code = absl::nullopt;
else
account_info->auth_code = auth_info;
}
account_info->account_type = account_type;
account_info->is_managed = is_managed;
return account_info;
}
bool IsPrimaryGaiaAccount(const std::string& gaia_id) {
// |GetPrimaryUser| is fine because ARC is only available on the first
// (Primary) account that participates in multi-signin.
const user_manager::User* user =
user_manager::UserManager::Get()->GetPrimaryUser();
DCHECK(user);
return user->GetAccountId().GetAccountType() == AccountType::GOOGLE &&
user->GetAccountId().GetGaiaId() == gaia_id;
}
bool IsPrimaryOrDeviceLocalAccount(
const signin::IdentityManager* identity_manager,
const std::string& account_name) {
// |GetPrimaryUser| is fine because ARC is only available on the first
// (Primary) account that participates in multi-signin.
const user_manager::User* user =
user_manager::UserManager::Get()->GetPrimaryUser();
DCHECK(user);
// There is no Gaia user for device local accounts, but in this case there is
// always only a primary account.
if (user->IsDeviceLocalAccount())
return true;
const AccountInfo account_info =
identity_manager->FindExtendedAccountInfoByEmailAddress(account_name);
if (account_info.IsEmpty())
return false;
DCHECK(!account_info.gaia.empty());
return IsPrimaryGaiaAccount(account_info.gaia);
}
// See //ash/components/arc/mojom/auth.mojom RequestPrimaryAccount() for the
// spec. See also go/arc-primary-account.
std::string GetAccountName(Profile* profile) {
switch (GetAccountType(profile)) {
case mojom::ChromeAccountType::USER_ACCOUNT:
case mojom::ChromeAccountType::CHILD_ACCOUNT:
// IdentityManager::GetPrimaryAccountInfo(
// signin::ConsentLevel::kSignin).email might be more appropriate
// here, but this is what we have done historically.
return chromeos::ProfileHelper::Get()
->GetUserByProfile(profile)
->GetDisplayEmail();
case mojom::ChromeAccountType::ROBOT_ACCOUNT:
case mojom::ChromeAccountType::ACTIVE_DIRECTORY_ACCOUNT:
case mojom::ChromeAccountType::OFFLINE_DEMO_ACCOUNT:
return std::string();
case mojom::ChromeAccountType::UNKNOWN:
NOTREACHED();
return std::string();
}
}
void OnFetchPrimaryAccountInfoCompleted(
ArcAuthService::RequestAccountInfoCallback callback,
bool persistent_error,
mojom::ArcAuthCodeStatus status,
mojom::AccountInfoPtr account_info) {
std::move(callback).Run(std::move(status), std::move(account_info),
persistent_error);
}
} // namespace
// static
const char ArcAuthService::kArcServiceName[] = "arc::ArcAuthService";
// static
ArcAuthService* ArcAuthService::GetForBrowserContext(
content::BrowserContext* context) {
return ArcAuthServiceFactory::GetForBrowserContext(context);
}
ArcAuthService::ArcAuthService(content::BrowserContext* browser_context,
ArcBridgeService* arc_bridge_service)
: profile_(Profile::FromBrowserContext(browser_context)),
identity_manager_(IdentityManagerFactory::GetForProfile(profile_)),
arc_bridge_service_(arc_bridge_service),
url_loader_factory_(profile_->GetDefaultStoragePartition()
->GetURLLoaderFactoryForBrowserProcess()) {
arc_bridge_service_->auth()->SetHost(this);
arc_bridge_service_->auth()->AddObserver(this);
ArcSessionManager::Get()->AddObserver(this);
identity_manager_->AddObserver(this);
}
ArcAuthService::~ArcAuthService() {
ArcSessionManager::Get()->RemoveObserver(this);
arc_bridge_service_->auth()->RemoveObserver(this);
arc_bridge_service_->auth()->SetHost(nullptr);
}
void ArcAuthService::GetGoogleAccountsInArc(
GetGoogleAccountsInArcCallback callback) {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
DCHECK(pending_get_arc_accounts_callback_.is_null())
<< "Cannot have more than one pending GetGoogleAccountsInArc request";
if (!arc::IsArcProvisioned(profile_)) {
std::move(callback).Run(std::vector<mojom::ArcAccountInfoPtr>());
return;
}
if (!arc_bridge_service_->auth()->IsConnected()) {
pending_get_arc_accounts_callback_ = std::move(callback);
// Will be retried in |OnConnectionReady|.
return;
}
DispatchAccountsInArc(std::move(callback));
}
void ArcAuthService::RequestPrimaryAccount(
RequestPrimaryAccountCallback callback) {
std::move(callback).Run(GetAccountName(profile_), GetAccountType(profile_));
}
void ArcAuthService::OnConnectionReady() {
// `TriggerAccountsPushToArc()` will not be triggered for the first session,
// when ARC has not been provisioned yet. For the first session, an account
// push will be triggered by `OnArcInitialStart()`, after a successful device
// provisioning.
// For the second and subsequent sessions, `arc::IsArcProvisioned()` will be
// `true`.
if (arc::IsArcProvisioned(profile_))
TriggerAccountsPushToArc(false /* filter_primary_account */);
if (pending_get_arc_accounts_callback_)
DispatchAccountsInArc(std::move(pending_get_arc_accounts_callback_));
// Report main account resolution status for provisioned devices.
if (!IsArcProvisioned(profile_))
return;
auto* instance = ARC_GET_INSTANCE_FOR_METHOD(arc_bridge_service_->auth(),
GetMainAccountResolutionStatus);
if (!instance)
return;
instance->GetMainAccountResolutionStatus(
base::BindOnce(&ArcAuthService::OnMainAccountResolutionStatus,
weak_ptr_factory_.GetWeakPtr()));
}
void ArcAuthService::OnConnectionClosed() {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
pending_token_requests_.clear();
}
<|fim▁hole|> if (account->is_initial_signin()) {
// UMA for initial signin is updated from ArcSessionManager.
ArcSessionManager::Get()->OnProvisioningFinished(provisioning_result);
return;
}
// Re-auth shouldn't be triggered for non-Gaia device local accounts.
if (!user_manager::UserManager::Get()->IsLoggedInAsUserWithGaiaAccount()) {
NOTREACHED() << "Shouldn't re-auth for non-Gaia accounts";
return;
}
const ProvisioningStatus status = GetProvisioningStatus(provisioning_result);
if (!account->is_account_name() || !account->get_account_name() ||
account->get_account_name().value().empty() ||
IsPrimaryOrDeviceLocalAccount(identity_manager_,
account->get_account_name().value())) {
// Reauthorization for the Primary Account.
// The check for |!account_name.has_value()| is for backwards compatibility
// with older ARC versions, for which Mojo will set |account_name| to
// empty/null.
UpdateReauthorizationResultUMA(status, profile_);
} else {
UpdateSecondarySigninResultUMA(status);
}
}
void ArcAuthService::ReportMetrics(mojom::MetricsType metrics_type,
int32_t value) {
switch (metrics_type) {
case mojom::MetricsType::NETWORK_WAITING_TIME_MILLISECONDS:
UpdateAuthTiming("Arc.Auth.NetworkWait.TimeDelta",
base::Milliseconds(value), profile_);
break;
case mojom::MetricsType::CHECKIN_ATTEMPTS:
UpdateAuthCheckinAttempts(value, profile_);
break;
case mojom::MetricsType::CHECKIN_TIME_MILLISECONDS:
UpdateAuthTiming("Arc.Auth.Checkin.TimeDelta", base::Milliseconds(value),
profile_);
break;
case mojom::MetricsType::SIGNIN_TIME_MILLISECONDS:
UpdateAuthTiming("Arc.Auth.SignIn.TimeDelta", base::Milliseconds(value),
profile_);
break;
case mojom::MetricsType::ACCOUNT_CHECK_MILLISECONDS:
UpdateAuthTiming("Arc.Auth.AccountCheck.TimeDelta",
base::Milliseconds(value), profile_);
break;
}
}
void ArcAuthService::ReportAccountCheckStatus(
mojom::AccountCheckStatus status) {
UpdateAuthAccountCheckStatus(status, profile_);
}
void ArcAuthService::ReportAccountReauthReason(mojom::ReauthReason reason) {
UpdateAccountReauthReason(reason, profile_);
}
void ArcAuthService::ReportAndroidIdSource(mojom::AndroidIdSource source) {
UpdateAndroidIdSource(source, profile_);
}
void ArcAuthService::ReportManagementChangeStatus(
mojom::ManagementChangeStatus status) {
UpdateSupervisionTransitionResultUMA(status);
switch (status) {
case mojom::ManagementChangeStatus::CLOUD_DPC_DISABLED:
case mojom::ManagementChangeStatus::CLOUD_DPC_ALREADY_DISABLED:
case mojom::ManagementChangeStatus::CLOUD_DPC_ENABLED:
case mojom::ManagementChangeStatus::CLOUD_DPC_ALREADY_ENABLED:
profile_->GetPrefs()->SetInteger(
prefs::kArcManagementTransition,
static_cast<int>(ArcManagementTransition::NO_TRANSITION));
// TODO(brunokim): notify potential observers.
break;
case mojom::ManagementChangeStatus::CLOUD_DPC_DISABLING_FAILED:
case mojom::ManagementChangeStatus::CLOUD_DPC_ENABLING_FAILED:
LOG(ERROR) << "Management transition failed: " << status;
ShowDataRemovalConfirmationDialog(
profile_, base::BindOnce(&ArcAuthService::OnDataRemovalAccepted,
weak_ptr_factory_.GetWeakPtr()));
break;
case mojom::ManagementChangeStatus::INVALID_MANAGEMENT_STATE:
NOTREACHED() << "Invalid status of management transition: " << status;
}
}
void ArcAuthService::RequestPrimaryAccountInfo(
RequestPrimaryAccountInfoCallback callback) {
// This is the provisioning flow.
FetchPrimaryAccountInfo(true /* initial_signin */, std::move(callback));
}
void ArcAuthService::RequestAccountInfo(const std::string& account_name,
RequestAccountInfoCallback callback) {
// This is the post provisioning flow.
// This request could have come for re-authenticating an existing account in
// ARC, or for signing in a new Secondary Account.
// Check if |account_name| points to a Secondary Account.
if (!IsPrimaryOrDeviceLocalAccount(identity_manager_, account_name)) {
FetchSecondaryAccountInfo(account_name, std::move(callback));
return;
}
// TODO(solovey): Check secondary account ARC sign-in statistics and send
// |persistent_error| == true for primary account for cases when refresh token
// has persistent error.
FetchPrimaryAccountInfo(
false /* initial_signin */,
base::BindOnce(&OnFetchPrimaryAccountInfoCompleted, std::move(callback),
false /* persistent_error */));
}
void ArcAuthService::FetchPrimaryAccountInfo(
bool initial_signin,
RequestPrimaryAccountInfoCallback callback) {
const mojom::ChromeAccountType account_type = GetAccountType(profile_);
if (IsArcOptInVerificationDisabled()) {
std::move(callback).Run(
mojom::ArcAuthCodeStatus::SUCCESS,
CreateAccountInfo(false /* is_enforced */,
std::string() /* auth_info */,
std::string() /* auth_name */, account_type,
policy_util::IsAccountManaged(profile_)));
return;
}
if (IsActiveDirectoryUserForProfile(profile_)) {
// For Active Directory enrolled devices, we get an enrollment token for a
// managed Google Play account from DMServer.
auto enrollment_token_fetcher =
std::make_unique<ArcActiveDirectoryEnrollmentTokenFetcher>(
ArcSessionManager::Get()->support_host());
// Add the request to |pending_token_requests_| first, before starting a
// token fetch. In case the callback is called immediately, we do not want
// to add an already completed request to |pending_token_requests_|.
auto* enrollment_token_fetcher_ptr = enrollment_token_fetcher.get();
pending_token_requests_.emplace_back(std::move(enrollment_token_fetcher));
enrollment_token_fetcher_ptr->Fetch(
base::BindOnce(&ArcAuthService::OnActiveDirectoryEnrollmentTokenFetched,
weak_ptr_factory_.GetWeakPtr(),
enrollment_token_fetcher_ptr, std::move(callback)));
return;
}
if (account_type == mojom::ChromeAccountType::OFFLINE_DEMO_ACCOUNT) {
// Skip account auth code fetch for offline enrolled demo mode.
std::move(callback).Run(
mojom::ArcAuthCodeStatus::SUCCESS,
CreateAccountInfo(true /* is_enforced */, std::string() /* auth_info */,
std::string() /* auth_name */, account_type,
true /* is_managed */));
return;
}
// For non-AD enrolled devices an auth code is fetched.
std::unique_ptr<ArcAuthCodeFetcher> auth_code_fetcher;
if (account_type == mojom::ChromeAccountType::ROBOT_ACCOUNT) {
// For robot accounts, which are used in kiosk and public session mode
// (which includes online demo sessions), use Robot auth code fetching.
auth_code_fetcher = std::make_unique<ArcRobotAuthCodeFetcher>();
if (url_loader_factory_for_testing_set_) {
static_cast<ArcRobotAuthCodeFetcher*>(auth_code_fetcher.get())
->SetURLLoaderFactoryForTesting(url_loader_factory_);
}
} else {
// Optionally retrieve auth code in silent mode. Use the "unconsented"
// primary account because this class doesn't care about browser sync
// consent.
DCHECK(identity_manager_->HasPrimaryAccount(signin::ConsentLevel::kSignin));
auth_code_fetcher = CreateArcBackgroundAuthCodeFetcher(
identity_manager_->GetPrimaryAccountId(signin::ConsentLevel::kSignin),
initial_signin);
}
// Add the request to |pending_token_requests_| first, before starting a token
// fetch. In case the callback is called immediately, we do not want to add an
// already completed request to |pending_token_requests_|.
auto* auth_code_fetcher_ptr = auth_code_fetcher.get();
pending_token_requests_.emplace_back(std::move(auth_code_fetcher));
auth_code_fetcher_ptr->Fetch(
base::BindOnce(&ArcAuthService::OnPrimaryAccountAuthCodeFetched,
weak_ptr_factory_.GetWeakPtr(), auth_code_fetcher_ptr,
std::move(callback)));
}
void ArcAuthService::IsAccountManagerAvailable(
IsAccountManagerAvailableCallback callback) {
std::move(callback).Run(ash::IsAccountManagerAvailable(profile_));
}
void ArcAuthService::HandleAddAccountRequest() {
DCHECK(ash::IsAccountManagerAvailable(profile_));
::GetAccountManagerFacade(profile_->GetPath().value())
->ShowAddAccountDialog(
account_manager::AccountManagerFacade::AccountAdditionSource::kArc);
}
void ArcAuthService::HandleRemoveAccountRequest(const std::string& email) {
DCHECK(ash::IsAccountManagerAvailable(profile_));
chrome::SettingsWindowManager::GetInstance()->ShowOSSettings(
profile_, chromeos::settings::mojom::kMyAccountsSubpagePath);
}
void ArcAuthService::HandleUpdateCredentialsRequest(const std::string& email) {
DCHECK(ash::IsAccountManagerAvailable(profile_));
::GetAccountManagerFacade(profile_->GetPath().value())
->ShowReauthAccountDialog(
account_manager::AccountManagerFacade::AccountAdditionSource::kArc,
email);
}
void ArcAuthService::OnRefreshTokenUpdatedForAccount(
const CoreAccountInfo& account_info) {
// TODO(sinhak): Identity Manager is specific to a Profile. Move this to a
// proper Profile independent entity once we have that.
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
if (!ash::IsAccountManagerAvailable(profile_))
return;
// Ignore the update if ARC has not been provisioned yet.
if (!arc::IsArcProvisioned(profile_))
return;
if (identity_manager_->HasAccountWithRefreshTokenInPersistentErrorState(
account_info.account_id)) {
VLOG(1) << "Ignoring account update due to lack of a valid token: "
<< account_info.email;
return;
}
auto* instance = ARC_GET_INSTANCE_FOR_METHOD(arc_bridge_service_->auth(),
OnAccountUpdated);
if (!instance)
return;
const std::string account_name = account_info.email;
DCHECK(!account_name.empty());
instance->OnAccountUpdated(account_name, mojom::AccountUpdateType::UPSERT);
}
void ArcAuthService::OnExtendedAccountInfoRemoved(
const AccountInfo& account_info) {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
if (!ash::IsAccountManagerAvailable(profile_))
return;
DCHECK(!IsPrimaryGaiaAccount(account_info.gaia));
// Ignore the update if ARC has not been provisioned yet.
if (!arc::IsArcProvisioned(profile_))
return;
auto* instance = ARC_GET_INSTANCE_FOR_METHOD(arc_bridge_service_->auth(),
OnAccountUpdated);
if (!instance)
return;
DCHECK(!account_info.email.empty());
instance->OnAccountUpdated(account_info.email,
mojom::AccountUpdateType::REMOVAL);
}
void ArcAuthService::OnArcInitialStart() {
TriggerAccountsPushToArc(true /* filter_primary_account */);
}
void ArcAuthService::Shutdown() {
identity_manager_->RemoveObserver(this);
}
void ArcAuthService::OnActiveDirectoryEnrollmentTokenFetched(
ArcActiveDirectoryEnrollmentTokenFetcher* fetcher,
RequestPrimaryAccountInfoCallback callback,
ArcActiveDirectoryEnrollmentTokenFetcher::Status status,
const std::string& enrollment_token,
const std::string& user_id) {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
// |fetcher| will be invalid after this.
DeletePendingTokenRequest(fetcher);
switch (status) {
case ArcActiveDirectoryEnrollmentTokenFetcher::Status::SUCCESS: {
// Save user_id to the user profile.
profile_->GetPrefs()->SetString(prefs::kArcActiveDirectoryPlayUserId,
user_id);
// Send enrollment token to ARC.
std::move(callback).Run(
mojom::ArcAuthCodeStatus::SUCCESS,
CreateAccountInfo(true /* is_enforced */, enrollment_token,
std::string() /* account_name */,
mojom::ChromeAccountType::ACTIVE_DIRECTORY_ACCOUNT,
true /* is_managed */));
break;
}
case ArcActiveDirectoryEnrollmentTokenFetcher::Status::FAILURE: {
// Send error to ARC.
std::move(callback).Run(
mojom::ArcAuthCodeStatus::CHROME_SERVER_COMMUNICATION_ERROR, nullptr);
break;
}
case ArcActiveDirectoryEnrollmentTokenFetcher::Status::ARC_DISABLED: {
// Send error to ARC.
std::move(callback).Run(mojom::ArcAuthCodeStatus::ARC_DISABLED, nullptr);
break;
}
}
}
void ArcAuthService::OnPrimaryAccountAuthCodeFetched(
ArcAuthCodeFetcher* fetcher,
RequestPrimaryAccountInfoCallback callback,
bool success,
const std::string& auth_code) {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
// |fetcher| will be invalid after this.
DeletePendingTokenRequest(fetcher);
if (success) {
const std::string& full_account_id = GetAccountName(profile_);
std::move(callback).Run(
mojom::ArcAuthCodeStatus::SUCCESS,
CreateAccountInfo(!IsArcOptInVerificationDisabled(), auth_code,
full_account_id, GetAccountType(profile_),
policy_util::IsAccountManaged(profile_)));
} else if (ash::DemoSession::Get() && ash::DemoSession::Get()->started()) {
// For demo sessions, if auth code fetch failed (e.g. because the device is
// offline), fall back to accountless offline demo mode provisioning.
std::move(callback).Run(
mojom::ArcAuthCodeStatus::SUCCESS,
CreateAccountInfo(true /* is_enforced */, std::string() /* auth_info */,
std::string() /* auth_name */,
mojom::ChromeAccountType::OFFLINE_DEMO_ACCOUNT,
true /* is_managed */));
} else if (arc::data_snapshotd::ArcDataSnapshotdManager::Get()->state() ==
arc::data_snapshotd::ArcDataSnapshotdManager::State::kRunning) {
// If MGS is running with a snapshotted data/, it could be offline.
const std::string& full_account_id = GetAccountName(profile_);
std::move(callback).Run(
mojom::ArcAuthCodeStatus::SUCCESS,
CreateAccountInfo(false /* is_enforced */,
std::string() /* auth_info */, full_account_id,
GetAccountType(profile_),
policy_util::IsAccountManaged(profile_)));
} else {
// Send error to ARC.
std::move(callback).Run(
mojom::ArcAuthCodeStatus::CHROME_SERVER_COMMUNICATION_ERROR, nullptr);
}
}
void ArcAuthService::FetchSecondaryAccountInfo(
const std::string& account_name,
RequestAccountInfoCallback callback) {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
AccountInfo account_info =
identity_manager_->FindExtendedAccountInfoByEmailAddress(account_name);
if (account_info.IsEmpty()) {
// Account is in ARC, but not in Chrome OS Account Manager.
std::move(callback).Run(mojom::ArcAuthCodeStatus::CHROME_ACCOUNT_NOT_FOUND,
nullptr /* account_info */,
true /* persistent_error */);
return;
}
const CoreAccountId& account_id = account_info.account_id;
DCHECK(!account_id.empty());
if (identity_manager_->HasAccountWithRefreshTokenInPersistentErrorState(
account_id)) {
std::move(callback).Run(
mojom::ArcAuthCodeStatus::CHROME_SERVER_COMMUNICATION_ERROR,
nullptr /* account_info */, true /* persistent_error */);
return;
}
std::unique_ptr<ArcBackgroundAuthCodeFetcher> fetcher =
CreateArcBackgroundAuthCodeFetcher(account_id,
false /* initial_signin */);
// Add the request to |pending_token_requests_| first, before starting a
// token fetch. In case the callback is called immediately, we do not want
// to add an already completed request to |pending_token_requests_|.
auto* fetcher_ptr = fetcher.get();
pending_token_requests_.emplace_back(std::move(fetcher));
fetcher_ptr->Fetch(
base::BindOnce(&ArcAuthService::OnSecondaryAccountAuthCodeFetched,
weak_ptr_factory_.GetWeakPtr(), account_name, fetcher_ptr,
std::move(callback)));
}
void ArcAuthService::OnSecondaryAccountAuthCodeFetched(
const std::string& account_name,
ArcBackgroundAuthCodeFetcher* fetcher,
RequestAccountInfoCallback callback,
bool success,
const std::string& auth_code) {
// |fetcher| will be invalid after this.
DeletePendingTokenRequest(fetcher);
if (success) {
std::move(callback).Run(
mojom::ArcAuthCodeStatus::SUCCESS,
CreateAccountInfo(true /* is_enforced */, auth_code, account_name,
mojom::ChromeAccountType::USER_ACCOUNT,
false /* is_managed */),
false /* persistent_error*/);
return;
}
AccountInfo account_info =
identity_manager_->FindExtendedAccountInfoByEmailAddress(account_name);
// Take care of the case when the user removes an account immediately after
// adding/re-authenticating it.
if (!account_info.IsEmpty()) {
const bool is_persistent_error =
identity_manager_->HasAccountWithRefreshTokenInPersistentErrorState(
account_info.account_id);
std::move(callback).Run(
mojom::ArcAuthCodeStatus::CHROME_SERVER_COMMUNICATION_ERROR,
nullptr /* account_info */, is_persistent_error);
return;
}
std::move(callback).Run(mojom::ArcAuthCodeStatus::CHROME_ACCOUNT_NOT_FOUND,
nullptr /* account_info */, true);
}
void ArcAuthService::DeletePendingTokenRequest(ArcFetcherBase* fetcher) {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
for (auto it = pending_token_requests_.begin();
it != pending_token_requests_.end(); ++it) {
if (it->get() == fetcher) {
pending_token_requests_.erase(it);
return;
}
}
// We should not have received a call to delete a |fetcher| that was not in
// |pending_token_requests_|.
NOTREACHED();
}
void ArcAuthService::SetURLLoaderFactoryForTesting(
scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory) {
url_loader_factory_ = std::move(url_loader_factory);
url_loader_factory_for_testing_set_ = true;
}
void ArcAuthService::OnDataRemovalAccepted(bool accepted) {
if (!accepted)
return;
if (!IsArcPlayStoreEnabledForProfile(profile_))
return;
VLOG(1)
<< "Request for data removal on child transition failure is confirmed";
ArcSessionManager::Get()->RequestArcDataRemoval();
ArcSessionManager::Get()->StopAndEnableArc();
}
std::unique_ptr<ArcBackgroundAuthCodeFetcher>
ArcAuthService::CreateArcBackgroundAuthCodeFetcher(
const CoreAccountId& account_id,
bool initial_signin) {
const AccountInfo account_info =
identity_manager_->FindExtendedAccountInfoByAccountId(account_id);
DCHECK(!account_info.IsEmpty());
auto fetcher = std::make_unique<ArcBackgroundAuthCodeFetcher>(
url_loader_factory_, profile_, account_id, initial_signin,
IsPrimaryGaiaAccount(account_info.gaia));
return fetcher;
}
void ArcAuthService::TriggerAccountsPushToArc(bool filter_primary_account) {
if (!ash::IsAccountManagerAvailable(profile_))
return;
const std::vector<CoreAccountInfo> accounts =
identity_manager_->GetAccountsWithRefreshTokens();
for (const CoreAccountInfo& account : accounts) {
if (filter_primary_account && IsPrimaryGaiaAccount(account.gaia))
continue;
OnRefreshTokenUpdatedForAccount(account);
}
}
void ArcAuthService::DispatchAccountsInArc(
GetGoogleAccountsInArcCallback callback) {
auto* instance = ARC_GET_INSTANCE_FOR_METHOD(arc_bridge_service_->auth(),
GetGoogleAccounts);
if (!instance) {
// Complete the callback so that it is not kept waiting forever.
std::move(callback).Run(std::vector<mojom::ArcAccountInfoPtr>());
return;
}
instance->GetGoogleAccounts(std::move(callback));
}
void ArcAuthService::OnMainAccountResolutionStatus(
mojom::MainAccountResolutionStatus status) {
UpdateMainAccountResolutionStatus(profile_, status);
}
} // namespace arc<|fim▁end|> | void ArcAuthService::OnAuthorizationResult(mojom::ArcSignInResultPtr result,
mojom::ArcSignInAccountPtr account) {
ArcProvisioningResult provisioning_result(std::move(result));
|
<|file_name|>Gruntfile.js<|end_file_name|><|fim▁begin|>/*jshint camelcase: false*/
module.exports = (grunt) => {
'use strict';
// load all grunt tasks
require('time-grunt')(grunt);
require('load-grunt-tasks')(grunt);
// configurable paths
const config = {
app: 'app',
dist: 'dist',
distMac32: 'dist/MacOS32',
distMac64: 'dist/MacOS64',
distLinux32: 'dist/Linux32',
distLinux64: 'dist/Linux64',
distWin32: 'dist/Win32',
distWin64: 'dist/Win64',
distWin: 'dist/Win',
tmp: 'buildTmp',
resources: 'resources',
appName: 'PlaylistPalace'
};
grunt.initConfig({
config: config,
clean: {
dist: {
files: [{
dot: true,
src: [
`${ config.dist }/*`,
`${ config.tmp }/*`
]
}]
},
distMac32: {
files: [{
dot: true,
src: [
`${ config.distMac32 }/*`,
`${ config.tmp }/*`
]
}]
},
distMac64: {
files: [{
dot: true,
src: [
`${ config.distMac64 }/*`,
`${ config.tmp }/*`
]
}]
},
distLinux64: {
files: [{
dot: true,
src: [
`${ config.distLinux64 }/*`,
`${ config.tmp }/*`
]
}]
},
distLinux32: {
files: [{
dot: true,
src: [
`${ config.distLinux32 }/*`,
`${ config.tmp }/*`
]
}]
},
distWin: {
files: [{
dot: true,
src: [
`${ config.distWin }/*`,
`${ config.tmp }/*`
]
}]
},
distWin32: {
files: [{
dot: true,
src: [
`${ config.distWin32 }/*`,
`${ config.tmp }/*`
]
}]
},
distWin64: {
files: [{
dot: true,
src: [
`${ config.distWin64 }/*`,
`${ config.tmp }/*`
]
}]
}
},
jshint: {
options: {
jshintrc: '.jshintrc'
},
files: `${ config.app }/js/*.js`
},
copy: {
appLinux: {
files: [{
expand: true,
cwd: `${ config.app }`,
dest: `${ config.distLinux64 }/app.nw`,
src: '**'
}]
},
appLinux32: {
files: [{
expand: true,
cwd: `${ config.app }`,
dest: `${ config.distLinux32 }/app.nw`,
src: '**'
}]
},<|fim▁hole|> files: [{
expand: true,
cwd: `${ config.app }`,
dest: `${ config.distMac32 }/node-webkit.app/Contents/Resources/app.nw`,
src: '**'
}, {
expand: true,
cwd: `${ config.resources }/mac/`,
dest: `${ config.distMac32 }/node-webkit.app/Contents/`,
filter: 'isFile',
src: '*.plist'
}, {
expand: true,
cwd: `${ config.resources }/mac/`,
dest: `${ config.distMac32 }/node-webkit.app/Contents/Resources/`,
filter: 'isFile',
src: '*.icns'
}, {
expand: true,
cwd: `${ config.app }/../node_modules/`,
dest: `${ config.distMac32 }/node-webkit.app/Contents/Resources/app.nw/node_modules/`,
src: '**'
}]
},
appMacos64: {
files: [{
expand: true,
cwd: `${ config.app }`,
dest: `${ config.distMac64 }/node-webkit.app/Contents/Resources/app.nw`,
src: '**'
}, {
expand: true,
cwd: `${ config.resources }/mac/`,
dest: `${ config.distMac64 }/node-webkit.app/Contents/`,
filter: 'isFile',
src: '*.plist'
}, {
expand: true,
cwd: `${ config.resources }/mac/`,
dest: `${ config.distMac64 }/node-webkit.app/Contents/Resources/`,
filter: 'isFile',
src: '*.icns'
}, {
expand: true,
cwd: `${ config.app }/../node_modules/`,
dest: `${ config.distMac64 }/node-webkit.app/Contents/Resources/app.nw/node_modules/`,
src: '**'
}]
},
webkit32: {
files: [{
expand: true,
cwd: `${ config.resources }/node-webkit/MacOS32`,
dest: `${ config.distMac32 }/`,
src: '**'
}]
},
webkit64: {
files: [{
expand: true,
cwd: `${ config.resources }/node-webkit/MacOS64`,
dest: `${ config.distMac64 }/`,
src: '**'
}]
},
copyWinToTmp: {
files: [{
expand: true,
cwd: `${ config.resources }/node-webkit/Windows/`,
dest: `${ config.tmp }/`,
src: '**'
}]
},
copyWin32ToTmp: {
files: [{
expand: true,
cwd: `${ config.resources }/node-webkit/Win32/`,
dest: `${ config.tmp }/`,
src: '**'
}]
},
copyWin64ToTmp: {
files: [{
expand: true,
cwd: `${ config.resources }/node-webkit/Win64/`,
dest: `${ config.tmp }/`,
src: '**'
}]
}
},
compress: {
appToTmp: {
options: {
archive: `${ config.tmp }/app.zip`
},
files: [{
expand: true,
cwd: `${ config.app }`,
src: ['**']
}]
},
finalWindowsApp: {
options: {
archive: `${ config.distWin }/${ config.appName }.zip`
},
files: [{
expand: true,
cwd: `${ config.tmp }`,
src: ['**']
}]
},
finalWindows32App: {
options: {
archive: `${ config.distWin32 }/${ config.appName }.zip`
},
files: [{
expand: true,
cwd: `${ config.tmp }`,
src: ['**']
}]
},
finalWindows64App: {
options: {
archive: `${ config.distWin64 }/${ config.appName }.zip`
},
files: [{
expand: true,
cwd: `${ config.tmp }`,
src: ['**']
}]
}
},
rename: {
macApp32: {
files: [{
src: `${ config.distMac32 }/node-webkit.app`,
dest: `${ config.distMac32 }/${ config.appName }.app`
}]
},
macApp64: {
files: [{
src: `${ config.distMac64 }/node-webkit.app`,
dest: `${ config.distMac64 }/${ config.appName }.app`
}]
},
zipToApp: {
files: [{
src: `${ config.tmp }/app.zip`,
dest: `${ config.tmp }/app.nw`
}]
}
}
});
grunt.registerTask('mkdir','Making directory if needed', () => {
grunt.initConfig({
mkdir: {
all: {
options: {
create: ['tmp_', 'test/ripper']
},
},
}
});
});
grunt.registerTask('chmod32', 'Add lost Permissions.', () => {
const fs = require('fs'),
path = `./${config.distMac32}/${ config.appName}.app/Contents/`;
console.log(path)
fs.chmodSync(path + 'Frameworks/node-webkit Helper EH.app/Contents/MacOS/node-webkit Helper EH', '555')
fs.chmodSync(path + 'Frameworks/node-webkit Helper NP.app/Contents/MacOS/node-webkit Helper NP', '555')
fs.chmodSync(path + 'Frameworks/node-webkit Helper.app/Contents/MacOS/node-webkit Helper', '555')
fs.chmodSync(path + 'MacOS/node-webkit', '555')
});
grunt.registerTask('chmod64', 'Add lost Permissions.', () => {
const fs = require('fs'),
path = `${config.distMac64}/${ config.appName}.app/Contents/`
fs.chmodSync(path + 'Frameworks/node-webkit Helper EH.app/Contents/MacOS/node-webkit Helper EH', '555')
fs.chmodSync(path + 'Frameworks/node-webkit Helper NP.app/Contents/MacOS/node-webkit Helper NP', '555')
fs.chmodSync(path + 'Frameworks/node-webkit Helper.app/Contents/MacOS/node-webkit Helper', '555')
fs.chmodSync(path + 'MacOS/node-webkit', '555')
});
grunt.registerTask('createLinuxApp', 'Create linux distribution.', (version) => {
const done = this.async()
const childProcess = require('child_process')
const exec = childProcess.exec
const path = './' + (version === 'Linux64' ? config.distLinux64 : config.distLinux32)
exec(`mkdir -p ${path}; cp resources/node-webkit/${version}'/nw.pak ${path} && cp resources/node-webkit/${version}/nw ${path}/node-webkit`, (error, stdout, stderr) => {
var result = true;
if (stdout) {
grunt.log.write(stdout);
}
if (stderr) {
grunt.log.write(stderr);
}
if (error !== null) {
grunt.log.error(error);
result = false;
}
done(result);
});
});
grunt.registerTask('createWindowsApp', 'Create windows distribution.', () => {
const done = this.async();
const concat = require('concat-files');
concat([
'buildTmp/nw.exe',
'buildTmp/app.nw'
], `buildTmp ${ config.appName }.exe`, function () {
var fs = require('fs');
fs.unlink('buildTmp/app.nw', function (error, stdout, stderr) {
if (stdout) {
grunt.log.write(stdout);
}
if (stderr) {
grunt.log.write(stderr);
}
if (error !== null) {
grunt.log.error(error);
done(false);
} else {
fs.unlink('buildTmp/nw.exe', (error, stdout, stderr) => {
var result = true;
if (stdout) {
grunt.log.write(stdout);
}
if (stderr) {
grunt.log.write(stderr);
}
if (error !== null) {
grunt.log.error(error);
result = false;
}
done(result);
});
}
});
});
});
grunt.registerTask('setVersion', 'Set version to all needed files', (version) => {
const config = grunt.config.get(['config'])
const appPath = config.app
const resourcesPath = config.resources
const mainPackageJSON = grunt.file.readJSON('package.json')
const appPackageJSON = grunt.file.readJSON(`${appPath}/package.json`)
const infoPlistTmp = grunt.file.read(`${resourcesPath}/mac/Info.plist.tmp`, {
encoding: 'UTF8'
});
const infoPlist = grunt.template.process(infoPlistTmp, {
data: {
version: version
}
})
mainPackageJSON.version = version
appPackageJSON.version = version
grunt.file.write('package.json', JSON.stringify(mainPackageJSON, null, 2), {
encoding: 'UTF8'
})
grunt.file.write(appPath + `${appPath}/package.json`, JSON.stringify(appPackageJSON, null, 2), {
encoding: 'UTF8'
})
grunt.file.write(resourcesPath + `${resourcesPath}/mac/Info.plist`, infoPlist, {
encoding: 'UTF8'
})
})
grunt.registerTask('dist-linux', [
'clean:distLinux64',
'copy:appLinux',
'createLinuxApp:Linux64'
])
grunt.registerTask('dist-linux32', [
'clean:distLinux32',
'copy:appLinux32',
'createLinuxApp:Linux32'
])
grunt.registerTask('dist-win', [
'clean:distWin',
'copy:copyWinToTmp',
'compress:appToTmp',
'rename:zipToApp',
'createWindowsApp',
'compress:finalWindowsApp'
])
grunt.registerTask('dist-win32', [
'clean:distWin32',
'copy:copyWin32ToTmp',
'compress:appToTmp',
'rename:zipToApp',
'createWindowsApp',
'compress:finalWindows32App'
])
grunt.registerTask('dist-win64', [
'clean:distWin64',
'copy:copyWin64ToTmp',
'compress:appToTmp',
'rename:zipToApp',
'createWindowsApp',
'compress:finalWindows64App'
])
grunt.registerTask('dist-mac', [
'clean:distMac64',
'copy:webkit64',
'copy:appMacos64',
'rename:macApp64',
'chmod64'
])
grunt.registerTask('dist-mac32', [
'clean:distMac32',
'copy:webkit32',
'copy:appMacos32',
'rename:macApp32',
'chmod32'
])
grunt.registerTask('check', [
'jshint'
])
grunt.registerTask('dmg', 'Create dmg from previously created app folder in dist.', () => {
const p = new Promise( (resolve, reject) => {
const createDmgCommand = `resources/mac/package.sh ${ config.appName }`
require('child_process').exec(createDmgCommand, (error, stdout, stderr) => {
if (stdout) {
grunt.log.write(stdout)
resolve(stdout)
}
if (stderr) {
grunt.log.write(stderr)
reject(stderr)
}
if (error !== null) {
grunt.log.error(error)
reject(error)
}
})
})
})
}<|fim▁end|> | appMacos32: { |
<|file_name|>fuse.rs<|end_file_name|><|fim▁begin|>use core::pin::Pin;
use futures_core::ready;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
#[cfg(feature = "sink")]
use futures_sink::Sink;
use pin_project_lite::pin_project;<|fim▁hole|>
pin_project! {
/// Stream for the [`fuse`](super::StreamExt::fuse) method.
#[derive(Debug)]
#[must_use = "streams do nothing unless polled"]
pub struct Fuse<St> {
#[pin]
stream: St,
done: bool,
}
}
impl<St> Fuse<St> {
pub(crate) fn new(stream: St) -> Self {
Self { stream, done: false }
}
/// Returns whether the underlying stream has finished or not.
///
/// If this method returns `true`, then all future calls to poll are
/// guaranteed to return `None`. If this returns `false`, then the
/// underlying stream is still in use.
pub fn is_done(&self) -> bool {
self.done
}
delegate_access_inner!(stream, St, ());
}
impl<S: Stream> FusedStream for Fuse<S> {
fn is_terminated(&self) -> bool {
self.done
}
}
impl<S: Stream> Stream for Fuse<S> {
type Item = S::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<S::Item>> {
let this = self.project();
if *this.done {
return Poll::Ready(None);
}
let item = ready!(this.stream.poll_next(cx));
if item.is_none() {
*this.done = true;
}
Poll::Ready(item)
}
fn size_hint(&self) -> (usize, Option<usize>) {
if self.done {
(0, Some(0))
} else {
self.stream.size_hint()
}
}
}
// Forwarding impl of Sink from the underlying stream
#[cfg(feature = "sink")]
impl<S: Stream + Sink<Item>, Item> Sink<Item> for Fuse<S> {
type Error = S::Error;
delegate_sink!(stream, Item);
}<|fim▁end|> | |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to
# make it easier to submit large numbers of jobs on supercomputers. It
# provides a python interface to physical input, such as crystal structures,
# as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs.<|fim▁hole|>#
# PyLaDa is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyLaDa. If not, see <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture
@fixture
def shell():
from IPython.core.interactiveshell import InteractiveShell
shell = InteractiveShell.instance()
shell.magic("load_ext pylada")
return shell
def Extract(outdir=None):
from os.path import exists
from os import getcwd
from collections import namedtuple
from pickle import load
from pylada.misc import chdir
if outdir == None:
outdir = getcwd()
Extract = namedtuple("Extract", ["success", "directory", "indiv", "functional"])
if not exists(outdir):
return Extract(False, outdir, None, functional)
with chdir(outdir):
if not exists("OUTCAR"):
return Extract(False, outdir, None, functional)
with open("OUTCAR", "rb") as file:
indiv, value = load(file)
return Extract(True, outdir, indiv, functional)
def call_functional(indiv, outdir=None, value=False, **kwargs):
from pylada.misc import local_path
from pickle import dump
path = local_path(outdir)
path.ensure(dir=True)
dump((indiv, value), path.join("OUTCAR").open("wb"))
return Extract(outdir)
call_functional.Extract = Extract
@fixture
def functional():
return call_functional<|fim▁end|> | # It is able to organise and launch computational jobs on PBS and SLURM. |
<|file_name|>dreamcast.py<|end_file_name|><|fim▁begin|>import mmap
import os.path
import re
from collections import OrderedDict
from .base_handler import BaseHandler
from .iso9660 import ISO9660Handler
from utils import MmappedFile, ConcatenatedFile
class GDIParseError(ValueError):
pass
class GDIHandler(BaseHandler):
def test(self):
if not re.match('^.*\.gdi', self.file_name, re.IGNORECASE):
return False
try:
self.parse()
except GDIParseError:
return False
return True
def parse(self):
text = self.read(0, 8*1024)
lines = text.decode('ascii').splitlines()<|fim▁hole|> raise GDIParseError
try:
n_tracks = int(lines.pop(0))
except ValueError:
raise GDIParseError
if len(lines) != n_tracks:
print(len(lines), n_tracks)
raise GDIParseError
# TODO figure out complete format
tracks = []
for track_i, line in enumerate(lines):
try:
match = re.match('(?P<index>\d+) (?P<sector>\d+) (?P<type>\d+) (?P<sector_size>\d+)'
' (?P<file_name>\S+) (\d+)', line)
if not match:
raise GDIParseError
track = match.groupdict()
for key in ('index', 'sector', 'type', 'sector_size'):
track[key] = int(track[key])
if track['index'] != track_i + 1:
raise GDIParseError
tracks.append(track)
except ValueError:
raise GDIParseError
return tracks
def get_info(self):
tracks = self.parse()
for track in tracks:
track['path'] = os.path.join(os.path.dirname(self.file_name), track['file_name'])
if len(tracks) > 3 and tracks[2]['type'] == 4 and tracks[-1]['type'] == 4:
# Dreamcast discs often contain two data tracks (track 3 and the last track) in addition to track 1.
mixed_mode = True
else:
mixed_mode = False
track_info = OrderedDict()
for track in tracks:
if mixed_mode and track == tracks[-1]:
continue
if mixed_mode and track['index'] == 3:
last_track = tracks[-1]
offset_gap = (last_track['sector'] - track['sector']) * 2352
track_name = 'Track {}+{}'.format(track['index'], last_track['index'])
file = ConcatenatedFile(file_names=[track['path'], last_track['path']],
offsets=[0, offset_gap]) # TODO handle different sector sizes
else:
track_name = 'Track {}'.format(track['index'])
file = MmappedFile(track['path'])
with file:
if track['type'] == 4:
handler = DCDataTrackHandler(file=file, file_name=track['file_name'], sector_offset=track['sector'], track_name=track_name)
if handler.test():
handler.get_info()
track_info[track_name] = handler.info
else:
track_info[track_name] = 'Data track in unknown format'
elif track['type'] == 0:
track_info[track_name] = 'Audio track'
else:
track_info[track_name] = 'Unknown'
self.info['Tracks'] = track_info
class DCDataTrackHandler(ISO9660Handler):
def test(self):
if not super().test():
return False
if self.read(0, 16) == b'SEGA SEGAKATANA ':
return True
else:
return False
def get_info(self):
header_info = OrderedDict()
header_info['Hardware ID'] = self.unpack('string', 0x00, 16, 0)
header_info['Maker ID'] = self.unpack('string', 0x10, 16, 0)
header_info['CRC'] = self.unpack('string', 0x20, 4, 0)
header_info['Device'] = self.unpack('string', 0x25, 6, 0)
header_info['Disc'] = self.unpack('string', 0x2b, 3, 0)
header_info['Region'] = self.unpack('string', 0x30, 8, 0).strip()
header_info['Peripherals'] = self.unpack('string', 0x38, 8, 0)
header_info['Product number'] = self.unpack('string', 0x40, 10, 0)
header_info['Product version'] = self.unpack('string', 0x4a, 6, 0)
header_info['Release date'] = self.unpack('string', 0x50, 16, 0)
header_info['Boot file'] = self.unpack('string', 0x60, 16, 0)
header_info['Company name'] = self.unpack('string', 0x70, 16, 0)
header_info['Software name'] = self.unpack('string', 0x80, 16, 0)
self.info['Header'] = header_info
super().get_info()<|fim▁end|> | if len(lines) == 1: |
<|file_name|>memory.rs<|end_file_name|><|fim▁begin|>use alloc::collections::{BTreeMap, BTreeSet};
use alloc::sync::{Arc, Weak};
use core::borrow::Borrow;
use core::cmp::{self, Eq, Ordering, PartialEq, PartialOrd};
use core::fmt::{self, Debug};
use core::intrinsics;
use core::ops::{Deref, DerefMut};
use spin::Mutex;
use syscall::{
flag::MapFlags,
error::*,
};
use crate::arch::paging::PAGE_SIZE;
use crate::context::file::FileDescriptor;
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::Frame;
use crate::paging::mapper::PageFlushAll;
use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, PageIter, PhysicalAddress, RmmA, VirtualAddress};
/// Round down to the nearest multiple of page size
pub fn round_down_pages(number: usize) -> usize {
number - number % PAGE_SIZE
}
/// Round up to the nearest multiple of page size
pub fn round_up_pages(number: usize) -> usize {
round_down_pages(number + PAGE_SIZE - 1)
}
pub fn page_flags(flags: MapFlags) -> PageFlags<RmmA> {
PageFlags::new()
.user(true)
.execute(flags.contains(MapFlags::PROT_EXEC))
.write(flags.contains(MapFlags::PROT_WRITE))
//TODO: PROT_READ
}
#[derive(Debug, Default)]
pub struct UserGrants {
pub inner: BTreeSet<Grant>,
//TODO: technically VirtualAddress is from a scheme's context!
pub funmap: BTreeMap<Region, VirtualAddress>,
}
impl UserGrants {
/// Returns the grant, if any, which occupies the specified address
pub fn contains(&self, address: VirtualAddress) -> Option<&Grant> {
let byte = Region::byte(address);
self.inner
.range(..=byte)
.next_back()
.filter(|existing| existing.occupies(byte))
}
/// Returns an iterator over all grants that occupy some part of the
/// requested region
pub fn conflicts<'a>(&'a self, requested: Region) -> impl Iterator<Item = &'a Grant> + 'a {
let start = self.contains(requested.start_address());
let start_region = start.map(Region::from).unwrap_or(requested);
self
.inner
.range(start_region..)
.take_while(move |region| !region.intersect(requested).is_empty())
}
/// Return a free region with the specified size
pub fn find_free(&self, size: usize) -> Region {
// Get last used region
let last = self.inner.iter().next_back().map(Region::from).unwrap_or(Region::new(VirtualAddress::new(0), 0));
// At the earliest, start at grant offset
let address = cmp::max(last.end_address().data(), crate::USER_GRANT_OFFSET);
// Create new region
Region::new(VirtualAddress::new(address), size)
}
/// Return a free region, respecting the user's hinted address and flags. Address may be null.
pub fn find_free_at(&mut self, address: VirtualAddress, size: usize, flags: MapFlags) -> Result<Region> {
if address == VirtualAddress::new(0) {
// Free hands!
return Ok(self.find_free(size));
}
// The user wished to have this region...
let mut requested = Region::new(address, size);
if
requested.end_address().data() >= crate::PML4_SIZE * 256 // There are 256 PML4 entries reserved for userspace
&& address.data() % PAGE_SIZE != 0
{
// ... but it was invalid
return Err(Error::new(EINVAL));
}
if let Some(grant) = self.contains(requested.start_address()) {
// ... but it already exists
if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) {
println!("grant: conflicts with: {:#x} - {:#x}", grant.start_address().data(), grant.end_address().data());
return Err(Error::new(EEXIST));
} else if flags.contains(MapFlags::MAP_FIXED) {
// TODO: Overwrite existing grant
return Err(Error::new(EOPNOTSUPP));
} else {
// TODO: Find grant close to requested address?
requested = self.find_free(requested.size());
}
}
Ok(requested)
}
}
impl Deref for UserGrants {
type Target = BTreeSet<Grant>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for UserGrants {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
#[derive(Clone, Copy)]
pub struct Region {
start: VirtualAddress,
size: usize,
}
impl Region {
/// Create a new region with the given size
pub fn new(start: VirtualAddress, size: usize) -> Self {
Self { start, size }
}
/// Create a new region spanning exactly one byte
pub fn byte(address: VirtualAddress) -> Self {
Self::new(address, 1)
}
/// Create a new region spanning between the start and end address
/// (exclusive end)
pub fn between(start: VirtualAddress, end: VirtualAddress) -> Self {
Self::new(
start,
end.data().saturating_sub(start.data()),
)
}
/// Return the part of the specified region that intersects with self.
pub fn intersect(&self, other: Self) -> Self {
Self::between(
cmp::max(self.start_address(), other.start_address()),
cmp::min(self.end_address(), other.end_address()),
)
}
/// Get the start address of the region
pub fn start_address(&self) -> VirtualAddress {
self.start
}
/// Set the start address of the region
pub fn set_start_address(&mut self, start: VirtualAddress) {
self.start = start;
}
/// Get the last address in the region (inclusive end)
pub fn final_address(&self) -> VirtualAddress {
VirtualAddress::new(self.start.data() + self.size - 1)
}
/// Get the start address of the next region (exclusive end)
pub fn end_address(&self) -> VirtualAddress {
VirtualAddress::new(self.start.data() + self.size)
}
/// Return the exact size of the region
pub fn size(&self) -> usize {
self.size
}
/// Return true if the size of this region is zero. Grants with such a
/// region should never exist.
pub fn is_empty(&self) -> bool {
self.size == 0
}
/// Set the exact size of the region
pub fn set_size(&mut self, size: usize) {
self.size = size;
}
/// Round region up to nearest page size
pub fn round(self) -> Self {
Self {
size: round_up_pages(self.size),
..self
}
}
/// Return the size of the grant in multiples of the page size
pub fn full_size(&self) -> usize {
self.round().size()
}
/// Returns true if the address is within the regions's requested range
pub fn collides(&self, other: Self) -> bool {
self.start_address() <= other.start_address() && other.end_address().data() - self.start_address().data() < self.size()
}
/// Returns true if the address is within the regions's actual range (so,
/// rounded up to the page size)
pub fn occupies(&self, other: Self) -> bool {
self.round().collides(other)
}
/// Return all pages containing a chunk of the region
pub fn pages(&self) -> PageIter {
Page::range_inclusive(
Page::containing_address(self.start_address()),
Page::containing_address(self.end_address())
)
}
/// Returns the region from the start of self until the start of the specified region.
///
/// # Panics
///
/// Panics if the given region starts before self
pub fn before(self, region: Self) -> Option<Self> {
assert!(self.start_address() <= region.start_address());
Some(Self::between(
self.start_address(),
region.start_address(),
)).filter(|reg| !reg.is_empty())
}
/// Returns the region from the end of the given region until the end of self.
///
/// # Panics
///
/// Panics if self ends before the given region
pub fn after(self, region: Self) -> Option<Self> {
assert!(region.end_address() <= self.end_address());
Some(Self::between(
region.end_address(),
self.end_address(),
)).filter(|reg| !reg.is_empty())
}
/// Re-base address that lives inside this region, onto a new base region
pub fn rebase(self, new_base: Self, address: VirtualAddress) -> VirtualAddress {
let offset = address.data() - self.start_address().data();
let new_start = new_base.start_address().data() + offset;
VirtualAddress::new(new_start)
}
}
impl PartialEq for Region {
fn eq(&self, other: &Self) -> bool {
self.start.eq(&other.start)
}
}
impl Eq for Region {}
impl PartialOrd for Region {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.start.partial_cmp(&other.start)
}
}
impl Ord for Region {
fn cmp(&self, other: &Self) -> Ordering {
self.start.cmp(&other.start)
}
}
impl Debug for Region {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}..{:#x} ({:#x} long)", self.start_address().data(), self.end_address().data(), self.size())
}
}
impl<'a> From<&'a Grant> for Region {
fn from(source: &'a Grant) -> Self {
source.region
}
}
#[derive(Debug)]
pub struct Grant {
region: Region,
flags: PageFlags<RmmA>,
mapped: bool,
owned: bool,
//TODO: This is probably a very heavy way to keep track of fmap'd files, perhaps move to the context?
pub desc_opt: Option<FileDescriptor>,
}
impl Grant {
pub fn is_owned(&self) -> bool {
self.owned
}
pub fn region(&self) -> &Region {
&self.region
}
/// Get a mutable reference to the region. This is unsafe, because a bad
/// region could lead to the wrong addresses being unmapped.
unsafe fn region_mut(&mut self) -> &mut Region {
&mut self.region
}
pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: PageFlags<RmmA>) -> Grant {
let mut active_table = unsafe { ActivePageTable::new(to.kind()) };
let flush_all = PageFlushAll::new();
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data() - to.data() + from.data()));
let result = active_table.map_to(page, frame, flags);
flush_all.consume(result);
}
flush_all.flush();
Grant {
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: false,
desc_opt: None,
}
}
pub fn map(to: VirtualAddress, size: usize, flags: PageFlags<RmmA>) -> Grant {
let mut active_table = unsafe { ActivePageTable::new(to.kind()) };
let flush_all = PageFlushAll::new();
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = active_table
.map(page, flags)
.expect("TODO: handle ENOMEM in Grant::map");
flush_all.consume(result);
}
flush_all.flush();
Grant {
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: true,
desc_opt: None,
}
}
pub fn map_inactive(src: VirtualAddress, dst: VirtualAddress, size: usize, flags: PageFlags<RmmA>, desc_opt: Option<FileDescriptor>, inactive_table: &mut InactivePageTable) -> Grant {
let active_table = unsafe { ActivePageTable::new(src.kind()) };
let mut inactive_mapper = inactive_table.mapper();
let src_start_page = Page::containing_address(src);
let src_end_page = Page::containing_address(VirtualAddress::new(src.data() + size - 1));
let src_range = Page::range_inclusive(src_start_page, src_end_page);
let dst_start_page = Page::containing_address(dst);
let dst_end_page = Page::containing_address(VirtualAddress::new(dst.data() + size - 1));
let dst_range = Page::range_inclusive(dst_start_page, dst_end_page);
for (src_page, dst_page) in src_range.zip(dst_range) {
let frame = active_table.translate_page(src_page).expect("grant references unmapped memory");
let inactive_flush = inactive_mapper.map_to(dst_page, frame, flags);
// Ignore result due to mapping on inactive table
unsafe { inactive_flush.ignore(); }
}
ipi(IpiKind::Tlb, IpiTarget::Other);
Grant {
region: Region {
start: dst,
size,
},
flags,
mapped: true,
owned: false,
desc_opt,
}
}
/// This function should only be used in clone!
pub fn secret_clone(&self, new_start: VirtualAddress) -> Grant {
assert!(self.mapped);
let mut active_table = unsafe { ActivePageTable::new(new_start.kind()) };
let flush_all = PageFlushAll::new();
let start_page = Page::containing_address(self.region.start);
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
if self.owned {
let result = active_table.map(new_page, PageFlags::new().write(true))
.expect("TODO: handle ENOMEM in Grant::secret_clone");
flush_all.consume(result);
} else {
let result = active_table.map_to(new_page, frame, flags);
flush_all.consume(result);
}
}
flush_all.flush();
if self.owned {
unsafe {
intrinsics::copy(self.region.start.data() as *const u8, new_start.data() as *mut u8, self.region.size);
}
let flush_all = PageFlushAll::new();
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
let result = active_table.remap(new_page, flags);
flush_all.consume(result);
}
flush_all.flush();
}
Grant {
region: Region {
start: new_start,
size: self.region.size,
},
flags: self.flags,
mapped: true,
owned: self.owned,
desc_opt: self.desc_opt.clone()
}
}
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable) {
assert!(self.mapped);
let mut active_table = unsafe { ActivePageTable::new(new_start.kind()) };
let flush_all = PageFlushAll::new();
let start_page = Page::containing_address(self.region.start);
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let (result, frame) = active_table.unmap_return(page, false);
flush_all.consume(result);
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
let result = new_table.mapper().map_to(new_page, frame, flags);
// Ignore result due to mapping on inactive table
unsafe { result.ignore(); }
}
flush_all.flush();
self.region.start = new_start;
}
pub fn flags(&self) -> PageFlags<RmmA> {
self.flags
}
pub fn unmap(mut self) {
assert!(self.mapped);
let mut active_table = unsafe { ActivePageTable::new(self.start_address().kind()) };
let flush_all = PageFlushAll::new();
let start_page = Page::containing_address(self.start_address());
let end_page = Page::containing_address(self.final_address());
for page in Page::range_inclusive(start_page, end_page) {
let (result, frame) = active_table.unmap_return(page, false);
if self.owned {
//TODO: make sure this frame can be safely freed, physical use counter
crate::memory::deallocate_frames(frame, 1);
}
flush_all.consume(result);
}
flush_all.flush();
if let Some(desc) = self.desc_opt.take() {
println!("Grant::unmap: close desc {:?}", desc);
//TODO: This imposes a large cost on unmapping, but that cost cannot be avoided without modifying fmap and funmap
let _ = desc.close();
}
self.mapped = false;
}
pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable) {
assert!(self.mapped);
let start_page = Page::containing_address(self.start_address());
let end_page = Page::containing_address(self.final_address());
for page in Page::range_inclusive(start_page, end_page) {
let (result, frame) = new_table.mapper().unmap_return(page, false);
if self.owned {
//TODO: make sure this frame can be safely freed, physical use counter
crate::memory::deallocate_frames(frame, 1);
}
// This is not the active table, so the flush can be ignored
unsafe { result.ignore(); }
}
ipi(IpiKind::Tlb, IpiTarget::Other);
if let Some(desc) = self.desc_opt.take() {
println!("Grant::unmap_inactive: close desc {:?}", desc);
//TODO: This imposes a large cost on unmapping, but that cost cannot be avoided without modifying fmap and funmap
let _ = desc.close();
}
self.mapped = false;
}
/// Extract out a region into a separate grant. The return value is as
/// follows: (before, new split, after). Before and after may be `None`,
/// which occurs when the split off region is at the start or end of the
/// page respectively.
///
/// # Panics
///
/// Panics if the start or end addresses of the region is not aligned to the
/// page size. To round up the size to the nearest page size, use `.round()`
/// on the region.
///
/// Also panics if the given region isn't completely contained within the
/// grant. Use `grant.intersect` to find a sub-region that works.
pub fn extract(mut self, region: Region) -> Option<(Option<Grant>, Grant, Option<Grant>)> {
assert_eq!(region.start_address().data() % PAGE_SIZE, 0, "split_out must be called on page-size aligned start address");
assert_eq!(region.size() % PAGE_SIZE, 0, "split_out must be called on page-size aligned end address");
let before_grant = self.before(region).map(|region| Grant {
region,
flags: self.flags,
mapped: self.mapped,
owned: self.owned,
desc_opt: self.desc_opt.clone(),
});
let after_grant = self.after(region).map(|region| Grant {
region,
flags: self.flags,
mapped: self.mapped,
owned: self.owned,
desc_opt: self.desc_opt.clone(),
});
unsafe {
*self.region_mut() = region;
}
Some((before_grant, self, after_grant))
}
}
impl Deref for Grant {
type Target = Region;
fn deref(&self) -> &Self::Target {
&self.region
}
}
impl PartialOrd for Grant {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.region.partial_cmp(&other.region)
}
}
impl Ord for Grant {
fn cmp(&self, other: &Self) -> Ordering {
self.region.cmp(&other.region)
}
}
impl PartialEq for Grant {
fn eq(&self, other: &Self) -> bool {
self.region.eq(&other.region)
}
}
impl Eq for Grant {}
impl Borrow<Region> for Grant {
fn borrow(&self) -> &Region {
&self.region
}
}
impl Drop for Grant {
fn drop(&mut self) {
assert!(!self.mapped, "Grant dropped while still mapped");
}
}
#[derive(Clone, Debug)]
pub enum SharedMemory {
Owned(Arc<Mutex<Memory>>),
Borrowed(Weak<Mutex<Memory>>)
}
impl SharedMemory {
pub fn with<F, T>(&self, f: F) -> T where F: FnOnce(&mut Memory) -> T {
match *self {
SharedMemory::Owned(ref memory_lock) => {
let mut memory = memory_lock.lock();
f(&mut *memory)
},
SharedMemory::Borrowed(ref memory_weak) => {
let memory_lock = memory_weak.upgrade().expect("SharedMemory::Borrowed no longer valid");
let mut memory = memory_lock.lock();
f(&mut *memory)
}
}
}
pub fn borrow(&self) -> SharedMemory {
match *self {
SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)),
SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone())
}
}
}
#[derive(Debug)]
pub struct Memory {
start: VirtualAddress,
size: usize,
flags: PageFlags<RmmA>,
}
impl Memory {
pub fn new(start: VirtualAddress, size: usize, flags: PageFlags<RmmA>, clear: bool) -> Self {
let mut memory = Memory {
start,
size,
flags,
};
memory.map(clear);
memory
}
pub fn to_shared(self) -> SharedMemory {
SharedMemory::Owned(Arc::new(Mutex::new(self)))
}
pub fn start_address(&self) -> VirtualAddress {
self.start
}
pub fn size(&self) -> usize {
self.size
}
pub fn flags(&self) -> PageFlags<RmmA> {
self.flags
}
pub fn pages(&self) -> PageIter {
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1));
Page::range_inclusive(start_page, end_page)
}
fn map(&mut self, clear: bool) {
let mut active_table = unsafe { ActivePageTable::new(self.start.kind()) };
let flush_all = PageFlushAll::new();
for page in self.pages() {
let result = active_table
.map(page, self.flags)
.expect("TODO: handle ENOMEM in Memory::map");
flush_all.consume(result);
}
flush_all.flush();
if clear {
assert!(self.flags.has_write());
unsafe {
intrinsics::write_bytes(self.start_address().data() as *mut u8, 0, self.size);
}
}
}
fn unmap(&mut self) {
let mut active_table = unsafe { ActivePageTable::new(self.start.kind()) };
let flush_all = PageFlushAll::new();
for page in self.pages() {
let result = active_table.unmap(page);
flush_all.consume(result);
}
flush_all.flush();
}
/// A complicated operation to move a piece of memory to a new page table
/// It also allows for changing the address at the same time
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable) {
let mut inactive_mapper = new_table.mapper();
let mut active_table = unsafe { ActivePageTable::new(new_start.kind()) };
let flush_all = PageFlushAll::new();
for page in self.pages() {
let (result, frame) = active_table.unmap_return(page, false);
flush_all.consume(result);
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.start.data() + new_start.data()));
let result = inactive_mapper.map_to(new_page, frame, self.flags);
// This is not the active table, so the flush can be ignored
unsafe { result.ignore(); }
}<|fim▁hole|>
flush_all.flush();
self.start = new_start;
}
pub fn remap(&mut self, new_flags: PageFlags<RmmA>) {
let mut active_table = unsafe { ActivePageTable::new(self.start.kind()) };
let flush_all = PageFlushAll::new();
for page in self.pages() {
let result = active_table.remap(page, new_flags);
flush_all.consume(result);
}
flush_all.flush();
self.flags = new_flags;
}
pub fn resize(&mut self, new_size: usize, clear: bool) {
let mut active_table = unsafe { ActivePageTable::new(self.start.kind()) };
//TODO: Calculate page changes to minimize operations
if new_size > self.size {
let flush_all = PageFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_none() {
let result = active_table
.map(page, self.flags)
.expect("TODO: Handle OOM in Memory::resize");
flush_all.consume(result);
}
}
flush_all.flush();
if clear {
unsafe {
intrinsics::write_bytes((self.start.data() + self.size) as *mut u8, 0, new_size - self.size);
}
}
} else if new_size < self.size {
let flush_all = PageFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_some() {
let result = active_table.unmap(page);
flush_all.consume(result);
}
}
flush_all.flush();
}
self.size = new_size;
}
}
impl Drop for Memory {
fn drop(&mut self) {
self.unmap();
}
}
pub const DANGLING: usize = 1 << (usize::BITS - 2);
#[cfg(tests)]
mod tests {
// TODO: Get these tests working
#[test]
fn region_collides() {
assert!(Region::new(0, 2).collides(Region::new(0, 1)));
assert!(Region::new(0, 2).collides(Region::new(1, 1)));
assert!(!Region::new(0, 2).collides(Region::new(2, 1)));
assert!(!Region::new(0, 2).collides(Region::new(3, 1)));
}
}<|fim▁end|> | |
<|file_name|>net.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use io;
use libc::consts::os::extra::INVALID_SOCKET;
use libc::{self, c_int, c_void};
use mem;
use net::SocketAddr;
#[allow(deprecated)]
use num::{SignedInt, Int};
use rt;
use sync::{Once, ONCE_INIT};
use sys::c;
use sys_common::{AsInner, FromInner};
pub type wrlen_t = i32;
pub struct Socket(libc::SOCKET);
/// Checks whether the Windows socket interface has been started already, and
/// if not, starts it.
pub fn init() {
static START: Once = ONCE_INIT;
START.call_once(|| unsafe {
let mut data: c::WSADATA = mem::zeroed();
let ret = c::WSAStartup(0x202, // version 2.2
&mut data);
assert_eq!(ret, 0);
let _ = rt::at_exit(|| { c::WSACleanup(); });
});
}
/// Returns the last error from the Windows socket interface.
fn last_error() -> io::Error {
io::Error::from_raw_os_error(unsafe { c::WSAGetLastError() })
}
/// Checks if the signed integer is the Windows constant `SOCKET_ERROR` (-1)
/// and if so, returns the last error from the Windows socket interface. . This
/// function must be called before another call to the socket API is made.
///
/// FIXME: generics needed?
#[allow(deprecated)]
pub fn cvt<T: SignedInt>(t: T) -> io::Result<T> {
let one: T = Int::one();
if t == -one {
Err(last_error())
} else {
Ok(t)
}
}
/// Provides the functionality of `cvt` for the return values of `getaddrinfo`
/// and similar, meaning that they return an error if the return value is 0.
pub fn cvt_gai(err: c_int) -> io::Result<()> {
if err == 0 { return Ok(()) }
cvt(err).map(|_| ())
}
/// Provides the functionality of `cvt` for a closure.
#[allow(deprecated)]
pub fn cvt_r<T: SignedInt, F>(mut f: F) -> io::Result<T> where F: FnMut() -> T {
cvt(f())
}
impl Socket {
pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> {
let fam = match *addr {
SocketAddr::V4(..) => libc::AF_INET,
SocketAddr::V6(..) => libc::AF_INET6,
};
match unsafe { libc::socket(fam, ty, 0) } {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
pub fn accept(&self, storage: *mut libc::sockaddr,
len: *mut libc::socklen_t) -> io::Result<Socket> {
match unsafe { libc::accept(self.0, storage, len) } {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
pub fn duplicate(&self) -> io::Result<Socket> {
unsafe {
let mut info: c::WSAPROTOCOL_INFO = mem::zeroed();
try!(cvt(c::WSADuplicateSocketW(self.0,
c::GetCurrentProcessId(),
&mut info)));
match c::WSASocketW(info.iAddressFamily,
info.iSocketType,
info.iProtocol,
&mut info, 0, 0) {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
// On unix when a socket is shut down all further reads return 0, so we
// do the same on windows to map a shut down socket to returning EOF.
unsafe {
match libc::recv(self.0, buf.as_mut_ptr() as *mut c_void,
buf.len() as i32, 0) {
-1 if c::WSAGetLastError() == c::WSAESHUTDOWN => Ok(0),
-1 => Err(last_error()),
n => Ok(n as usize)
}
}
}
}
impl Drop for Socket {
fn drop(&mut self) {
let _ = unsafe { libc::closesocket(self.0) };
}<|fim▁hole|>}
impl AsInner<libc::SOCKET> for Socket {
fn as_inner(&self) -> &libc::SOCKET { &self.0 }
}
impl FromInner<libc::SOCKET> for Socket {
fn from_inner(sock: libc::SOCKET) -> Socket { Socket(sock) }
}<|fim▁end|> | |
<|file_name|>myserver.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
<|fim▁hole|> s = socket.socket()
host = socket.gethostname()
port = 12345
s.bind((host, port))
s.listen(5)
while True:
c, addr = s.accept()
print c
print 'connect addr: ', addr
c.send('Welcome to CaiNiao!')
if cmp(c.recv(1024), "GoodBye") == 0:
break
c.close()
s.close()<|fim▁end|> | import socket
def server_test(): |
<|file_name|>DIAMOND_results_filter.py<|end_file_name|><|fim▁begin|>#!/usr/lib/python2.7
##########################################################################
#
# Copyright (C) 2015-2016 Sam Westreich
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation;
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#<|fim▁hole|># DIAMOND_results_filter.py
# Created 1/30/17, this version updated 5/22/17
# Sam Westreich, [email protected], github.com/transcript
#
# Purpose: This takes a DIAMOND outfile and the RefSeq database and pulls
# out hits to any specific organism, identifying the raw input reads that
# were mapped to that organism.
# Usage:
#
# -I infile specifies the infile (a DIAMOND results file
# in m8 format)
# -SO specific target the organism search term, either genus,
# species, or function.
# -D database file specifies a reference database to search
# against for results
# -O outfile name optional; changes the default outfile name
#
##########################################################################
# imports
import operator, sys, time, gzip, re
# String searching function:
def string_find(usage_term):
for idx, elem in enumerate(sys.argv):
this_elem = elem
next_elem = sys.argv[(idx + 1) % len(sys.argv)]
if elem == usage_term:
return next_elem
# loading starting file
if "-I" in sys.argv:
infile_name = string_find("-I")
else:
sys.exit ("WARNING: infile must be specified using '-I' flag.")
# optional outfile of specific organism results
if "-SO" in sys.argv:
target_org = string_find("-SO")
if '"' in target_org:
for idx, elem in enumerate(sys.argv):
this_elem = elem
next_elem = sys.argv[(idx + 1) % len(sys.argv)]
second_elem = sys.argv[(idx + 2) % len(sys.argv)]
if elem == "-SO":
target_org = next_elem + " " + second_elem
if "-O" in sys.argv:
target_org_outfile = open(string_find("-O"), "w")
else:
target_org_outfile = open(infile_name[:-4] + "_" + target_org + ".tsv", "w")
else:
sys.exit("Need to specify target organism or function to filter by, using -SO flag.")
# loading database file
if "-D" in sys.argv:
db = open(string_find("-D"), "r")
else:
sys.exit("WARNING: database must be specified using '-D' flag.")
# Getting the database assembled
db_org_dictionary = {}
db_id_dictionary = {}
db_line_counter = 0
db_error_counter = 0
t0 = time.time()
for line in db:
if line.startswith(">") == True:
db_line_counter += 1
# line counter to show progress
if db_line_counter % 1000000 == 0: # each million
t95 = time.time()
print (str(db_line_counter) + " lines processed so far in " + str(t95-t0) + " seconds.")
if target_org in line:
splitline = line.split(" ")
# ID, the hit returned in DIAMOND results
db_id = str(splitline[0])[1:].split(" ")[0]
# name and functional description
db_entry = line.split("[", 1)
db_entry = db_entry[0].split(" ", 1)
db_entry = db_entry[1][:-1]
# organism name
if line.count("[") != 1:
splitline = line.split("[")
db_org = splitline[line.count("[")].strip()[:-1]
if db_org[0].isdigit():
split_db_org = db_org.split()
try:
db_org = split_db_org[1] + " " + split_db_org[2]
except IndexError:
try:
db_org = split_db_org[1]
except IndexError:
db_org = splitline[line.count("[")-1]
if db_org[0].isdigit():
split_db_org = db_org.split()
db_org = split_db_org[1] + " " + split_db_org[2]
else:
db_org = line.split("[", 1)
db_org = db_org[1].split()
try:
db_org = str(db_org[0]) + " " + str(db_org[1])
except IndexError:
db_org = line.strip().split("[", 1)
db_org = db_org[1][:-1]
db_error_counter += 1
db_org = re.sub('[^a-zA-Z0-9-_*. ]', '', db_org)
# add to dictionaries
db_org_dictionary[db_id] = db_org
db_id_dictionary[db_id] = db_entry
db.close()
print ("Database is read and set up, moving on to the infile...")
infile = open (infile_name, "r")
# setting up databases
RefSeq_hit_count_db = {}
unique_seq_db = {}
line_counter = 0
hit_counter = 0
t1 = time.time()
# reading through the infile
for line in infile:
line_counter += 1
splitline = line.split("\t")
try:
target_org_outfile.write(splitline[0] + "\t" + splitline[1] + "\t" + db_org_dictionary[splitline[1]] + "\t" + db_id_dictionary[splitline[1]] + "\n")
hit_counter += 1
except KeyError:
continue
if line_counter % 1000000 == 0:
t99 = time.time()
print (str(line_counter)[:-6] + "M lines processed so far in " + str(t99-t1) + " seconds.")
# results stats
t100 = time.time()
print ("Run complete!")
print ("Number of sequences found matching target query, " + target_org + ":\t" + str(hit_counter))
print ("Time elapsed: " + str(t100-t0) + " seconds.")
infile.close()
target_org_outfile.close()<|fim▁end|> | ##########################################################################
# |
<|file_name|>latestupload.py<|end_file_name|><|fim▁begin|>import sys
from pymongo import MongoClient
# Connecting to the mongo client<|fim▁hole|>collection = db['userDB']
userEmail = sys.argv[1]
result = collection.find({'email':userEmail})
pIDs = result['personIDs']
if len(pIDs)==0:
exit(1)
print(pIDs.pop())
exit(0)<|fim▁end|> | client = MongoClient('localhost',27017)
# Connecting to the database
db = client['rescueHomeless']
# Connecting to the required collection |
<|file_name|>curly.ts<|end_file_name|><|fim▁begin|>import { privatize as P } from '@ember/-internals/container';
import { getOwner } from '@ember/-internals/owner';
import { guidFor } from '@ember/-internals/utils';
import {
addChildView,
OwnedTemplateMeta,
setElementView,
setViewElement,
} from '@ember/-internals/views';
import { assert, debugFreeze } from '@ember/debug';
import { _instrumentStart } from '@ember/instrumentation';
import { assign } from '@ember/polyfills';
import { DEBUG } from '@glimmer/env';
import {
ComponentCapabilities,
Dict,
Option,
ProgramSymbolTable,
Simple,
VMHandle,
} from '@glimmer/interfaces';
import { combine, Tag, validate, value, VersionedPathReference } from '@glimmer/reference';
import {
Arguments,
Bounds,
ComponentDefinition,
ElementOperations,
Invocation,
PreparedArguments,
PrimitiveReference,
WithDynamicLayout,
WithDynamicTagName,
WithStaticLayout,
} from '@glimmer/runtime';
import { Destroyable, EMPTY_ARRAY } from '@glimmer/util';
import { BOUNDS, DIRTY_TAG, HAS_BLOCK, IS_DISPATCHING_ATTRS, ROOT_REF } from '../component';
import Environment from '../environment';
import { DynamicScope } from '../renderer';
import RuntimeResolver from '../resolver';
import { Factory as TemplateFactory, isTemplateFactory, OwnedTemplate } from '../template';
import {
AttributeBinding,
ClassNameBinding,
IsVisibleBinding,
referenceForKey,
SimpleClassNameBindingReference,
} from '../utils/bindings';
import ComponentStateBucket, { Component } from '../utils/curly-component-state-bucket';
import { processComponentArgs } from '../utils/process-args';
import AbstractManager from './abstract';
import DefinitionState from './definition-state';
function aliasIdToElementId(args: Arguments, props: any) {
if (args.named.has('id')) {
// tslint:disable-next-line:max-line-length
assert(
`You cannot invoke a component with both 'id' and 'elementId' at the same time.`,
!args.named.has('elementId')<|fim▁hole|>}
// We must traverse the attributeBindings in reverse keeping track of
// what has already been applied. This is essentially refining the concatenated
// properties applying right to left.
function applyAttributeBindings(
element: Simple.Element,
attributeBindings: Array<string>,
component: Component,
operations: ElementOperations
) {
let seen: string[] = [];
let i = attributeBindings.length - 1;
while (i !== -1) {
let binding = attributeBindings[i];
let parsed: [string, string, boolean] = AttributeBinding.parse(binding);
let attribute = parsed[1];
if (seen.indexOf(attribute) === -1) {
seen.push(attribute);
AttributeBinding.install(element, component, parsed, operations);
}
i--;
}
if (seen.indexOf('id') === -1) {
let id = component.elementId ? component.elementId : guidFor(component);
operations.setAttribute('id', PrimitiveReference.create(id), false, null);
}
if (seen.indexOf('style') === -1) {
IsVisibleBinding.install(element, component, operations);
}
}
const DEFAULT_LAYOUT = P`template:components/-default`;
const EMPTY_POSITIONAL_ARGS: VersionedPathReference[] = [];
debugFreeze(EMPTY_POSITIONAL_ARGS);
export default class CurlyComponentManager
extends AbstractManager<ComponentStateBucket, DefinitionState>
implements
WithStaticLayout<ComponentStateBucket, DefinitionState, OwnedTemplateMeta, RuntimeResolver>,
WithDynamicTagName<ComponentStateBucket>,
WithDynamicLayout<ComponentStateBucket, OwnedTemplateMeta, RuntimeResolver> {
getLayout(state: DefinitionState, _resolver: RuntimeResolver): Invocation {
return {
// TODO fix
handle: (state.handle as any) as number,
symbolTable: state.symbolTable!,
};
}
protected templateFor(component: Component): OwnedTemplate {
let { layout, layoutName } = component;
let owner = getOwner(component);
let factory: TemplateFactory;
if (layout === undefined) {
if (layoutName !== undefined) {
let _factory = owner.lookup<TemplateFactory>(`template:${layoutName}`);
assert(`Layout \`${layoutName}\` not found!`, _factory !== undefined);
factory = _factory!;
} else {
factory = owner.lookup<TemplateFactory>(DEFAULT_LAYOUT)!;
}
} else if (isTemplateFactory(layout)) {
factory = layout;
} else {
// we were provided an instance already
return layout;
}
return factory(owner);
}
getDynamicLayout({ component }: ComponentStateBucket): Invocation {
let template = this.templateFor(component);
let layout = template.asWrappedLayout();
return {
handle: layout.compile(),
symbolTable: layout.symbolTable,
};
}
getTagName(state: ComponentStateBucket): Option<string> {
let { component, hasWrappedElement } = state;
if (!hasWrappedElement) {
return null;
}
return (component && component.tagName) || 'div';
}
getCapabilities(state: DefinitionState) {
return state.capabilities;
}
prepareArgs(state: DefinitionState, args: Arguments): Option<PreparedArguments> {
if (args.named.has('__ARGS__')) {
let __args__ = args.named.get('__ARGS__').value() as Dict<VersionedPathReference>;
let prepared = {
positional: EMPTY_POSITIONAL_ARGS,
named: {
...args.named.capture().map,
...__args__,
},
};
if (DEBUG) {
delete prepared.named.__ARGS__;
}
return prepared;
}
const { positionalParams } = state.ComponentClass.class!;
// early exits
if (
positionalParams === undefined ||
positionalParams === null ||
args.positional.length === 0
) {
return null;
}
let named: PreparedArguments['named'];
if (typeof positionalParams === 'string') {
assert(
`You cannot specify positional parameters and the hash argument \`${positionalParams}\`.`,
!args.named.has(positionalParams)
);
named = { [positionalParams]: args.positional.capture() };
assign(named, args.named.capture().map);
} else if (Array.isArray(positionalParams) && positionalParams.length > 0) {
const count = Math.min(positionalParams.length, args.positional.length);
named = {};
assign(named, args.named.capture().map);
for (let i = 0; i < count; i++) {
const name = positionalParams[i];
assert(
`You cannot specify both a positional param (at position ${i}) and the hash argument \`${name}\`.`,
!args.named.has(name)
);
named[name] = args.positional.at(i);
}
} else {
return null;
}
return { positional: EMPTY_ARRAY, named };
}
/*
* This hook is responsible for actually instantiating the component instance.
* It also is where we perform additional bookkeeping to support legacy
* features like exposed by view mixins like ChildViewSupport, ActionSupport,
* etc.
*/
create(
environment: Environment,
state: DefinitionState,
args: Arguments,
dynamicScope: DynamicScope,
callerSelfRef: VersionedPathReference,
hasBlock: boolean
): ComponentStateBucket {
if (DEBUG) {
this._pushToDebugStack(`component:${state.name}`, environment);
}
// Get the nearest concrete component instance from the scope. "Virtual"
// components will be skipped.
let parentView = dynamicScope.view;
// Get the Ember.Component subclass to instantiate for this component.
let factory = state.ComponentClass;
// Capture the arguments, which tells Glimmer to give us our own, stable
// copy of the Arguments object that is safe to hold on to between renders.
let capturedArgs = args.named.capture();
let props = processComponentArgs(capturedArgs);
// Alias `id` argument to `elementId` property on the component instance.
aliasIdToElementId(args, props);
// Set component instance's parentView property to point to nearest concrete
// component.
props.parentView = parentView;
// Set whether this component was invoked with a block
// (`{{#my-component}}{{/my-component}}`) or without one
// (`{{my-component}}`).
props[HAS_BLOCK] = hasBlock;
// Save the current `this` context of the template as the component's
// `_target`, so bubbled actions are routed to the right place.
props._target = callerSelfRef.value();
// static layout asserts CurriedDefinition
if (state.template) {
props.layout = state.template;
}
// Now that we've built up all of the properties to set on the component instance,
// actually create it.
let component = factory.create(props);
let finalizer = _instrumentStart('render.component', initialRenderInstrumentDetails, component);
// We become the new parentView for downstream components, so save our
// component off on the dynamic scope.
dynamicScope.view = component;
// Unless we're the root component, we need to add ourselves to our parent
// component's childViews array.
if (parentView !== null && parentView !== undefined) {
addChildView(parentView, component);
}
component.trigger('didReceiveAttrs');
let hasWrappedElement = component.tagName !== '';
// We usually do this in the `didCreateElement`, but that hook doesn't fire for tagless components
if (!hasWrappedElement) {
if (environment.isInteractive) {
component.trigger('willRender');
}
component._transitionTo('hasElement');
if (environment.isInteractive) {
component.trigger('willInsertElement');
}
}
// Track additional lifecycle metadata about this component in a state bucket.
// Essentially we're saving off all the state we'll need in the future.
let bucket = new ComponentStateBucket(
environment,
component,
capturedArgs,
finalizer,
hasWrappedElement
);
if (args.named.has('class')) {
bucket.classRef = args.named.get('class');
}
if (DEBUG) {
processComponentInitializationAssertions(component, props);
}
if (environment.isInteractive && hasWrappedElement) {
component.trigger('willRender');
}
return bucket;
}
getSelf({ component }: ComponentStateBucket): VersionedPathReference {
return component[ROOT_REF];
}
didCreateElement(
{ component, classRef, environment }: ComponentStateBucket,
element: Simple.Element,
operations: ElementOperations
): void {
setViewElement(component, element);
setElementView(element, component);
let { attributeBindings, classNames, classNameBindings } = component;
if (attributeBindings && attributeBindings.length) {
applyAttributeBindings(element, attributeBindings, component, operations);
} else {
let id = component.elementId ? component.elementId : guidFor(component);
operations.setAttribute('id', PrimitiveReference.create(id), false, null);
IsVisibleBinding.install(element, component, operations);
}
if (classRef) {
const ref = new SimpleClassNameBindingReference(classRef, classRef['propertyKey']);
operations.setAttribute('class', ref, false, null);
}
if (classNames && classNames.length) {
classNames.forEach((name: string) => {
operations.setAttribute('class', PrimitiveReference.create(name), false, null);
});
}
if (classNameBindings && classNameBindings.length) {
classNameBindings.forEach((binding: string) => {
ClassNameBinding.install(element, component, binding, operations);
});
}
operations.setAttribute('class', PrimitiveReference.create('ember-view'), false, null);
if ('ariaRole' in component) {
operations.setAttribute('role', referenceForKey(component, 'ariaRole'), false, null);
}
component._transitionTo('hasElement');
if (environment.isInteractive) {
component.trigger('willInsertElement');
}
}
didRenderLayout(bucket: ComponentStateBucket, bounds: Bounds): void {
bucket.component[BOUNDS] = bounds;
bucket.finalize();
if (DEBUG) {
this.debugStack.pop();
}
}
getTag({ args, component }: ComponentStateBucket): Tag {
return args ? combine([args.tag, component[DIRTY_TAG]]) : component[DIRTY_TAG];
}
didCreate({ component, environment }: ComponentStateBucket): void {
if (environment.isInteractive) {
component._transitionTo('inDOM');
component.trigger('didInsertElement');
component.trigger('didRender');
}
}
update(bucket: ComponentStateBucket): void {
let { component, args, argsRevision, environment } = bucket;
if (DEBUG) {
this._pushToDebugStack(component._debugContainerKey, environment);
}
bucket.finalizer = _instrumentStart('render.component', rerenderInstrumentDetails, component);
if (args && !validate(args.tag, argsRevision)) {
let props = processComponentArgs(args!);
bucket.argsRevision = value(args!.tag);
component[IS_DISPATCHING_ATTRS] = true;
component.setProperties(props);
component[IS_DISPATCHING_ATTRS] = false;
component.trigger('didUpdateAttrs');
component.trigger('didReceiveAttrs');
}
if (environment.isInteractive) {
component.trigger('willUpdate');
component.trigger('willRender');
}
}
didUpdateLayout(bucket: ComponentStateBucket): void {
bucket.finalize();
if (DEBUG) {
this.debugStack.pop();
}
}
didUpdate({ component, environment }: ComponentStateBucket): void {
if (environment.isInteractive) {
component.trigger('didUpdate');
component.trigger('didRender');
}
}
getDestructor(stateBucket: ComponentStateBucket): Option<Destroyable> {
return stateBucket;
}
}
export function validatePositionalParameters(
named: { has(name: string): boolean },
positional: { length: number },
positionalParamsDefinition: any
) {
if (DEBUG) {
if (!named || !positional || !positional.length) {
return;
}
let paramType = typeof positionalParamsDefinition;
if (paramType === 'string') {
// tslint:disable-next-line:max-line-length
assert(
`You cannot specify positional parameters and the hash argument \`${positionalParamsDefinition}\`.`,
!named.has(positionalParamsDefinition)
);
} else {
if (positional.length < positionalParamsDefinition.length) {
positionalParamsDefinition = positionalParamsDefinition.slice(0, positional.length);
}
for (let i = 0; i < positionalParamsDefinition.length; i++) {
let name = positionalParamsDefinition[i];
assert(
`You cannot specify both a positional param (at position ${i}) and the hash argument \`${name}\`.`,
!named.has(name)
);
}
}
}
}
export function processComponentInitializationAssertions(component: Component, props: any) {
assert(
`classNameBindings must be non-empty strings: ${component}`,
(() => {
let { classNameBindings } = component;
for (let i = 0; i < classNameBindings.length; i++) {
let binding = classNameBindings[i];
if (typeof binding !== 'string' || binding.length === 0) {
return false;
}
}
return true;
})()
);
assert(
`classNameBindings must not have spaces in them: ${component}`,
(() => {
let { classNameBindings } = component;
for (let i = 0; i < classNameBindings.length; i++) {
let binding = classNameBindings[i];
if (binding.split(' ').length > 1) {
return false;
}
}
return true;
})()
);
assert(
`You cannot use \`classNameBindings\` on a tag-less component: ${component}`,
component.tagName !== '' ||
!component.classNameBindings ||
component.classNameBindings.length === 0
);
assert(
`You cannot use \`elementId\` on a tag-less component: ${component}`,
component.tagName !== '' ||
props.id === component.elementId ||
(!component.elementId && component.elementId !== '')
);
assert(
`You cannot use \`attributeBindings\` on a tag-less component: ${component}`,
component.tagName !== '' ||
!component.attributeBindings ||
component.attributeBindings.length === 0
);
}
export function initialRenderInstrumentDetails(component: any): any {
return component.instrumentDetails({ initialRender: true });
}
export function rerenderInstrumentDetails(component: any): any {
return component.instrumentDetails({ initialRender: false });
}
// This is not any of glimmer-vm's proper Argument types because we
// don't have sufficient public constructors to conveniently
// reassemble one after we mangle the various arguments.
interface CurriedArgs {
positional: any[];
named: any;
}
export const CURLY_CAPABILITIES: ComponentCapabilities = {
dynamicLayout: true,
dynamicTag: true,
prepareArgs: true,
createArgs: true,
attributeHook: true,
elementHook: true,
createCaller: true,
dynamicScope: true,
updateHook: true,
createInstance: true,
};
const CURLY_COMPONENT_MANAGER = new CurlyComponentManager();
export class CurlyComponentDefinition implements ComponentDefinition {
public args: CurriedArgs | undefined;
public state: DefinitionState;
public symbolTable: ProgramSymbolTable | undefined;
public manager: CurlyComponentManager = CURLY_COMPONENT_MANAGER;
// tslint:disable-next-line:no-shadowed-variable
constructor(
public name: string,
public ComponentClass: any,
public handle: Option<VMHandle>,
public template: Option<OwnedTemplate>,
args?: CurriedArgs
) {
const layout = template && template.asLayout();
const symbolTable = layout ? layout.symbolTable : undefined;
this.symbolTable = symbolTable;
this.template = template;
this.args = args;
this.state = {
name,
ComponentClass,
handle,
template,
capabilities: CURLY_CAPABILITIES,
symbolTable,
};
}
}<|fim▁end|> | );
props.elementId = props.id;
} |
<|file_name|>omd_livestatus.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
u"""
OMD Livestatus dynamic inventory script
=======================================
If running as an OMD site user, i.e. if ${OMD_ROOT} is set, we try to
connect to the Livestatus socket at the default location
${OMD_ROOT}/tmp/run/live
Alternatively, the path to the Livestatus socket can be set from the
environment via
export OMD_LIVESTATUS_SOCKET=/omd/sites/mysite/tmp/run/live
or on the command-line with --socket.
Inspired by the DigitalOcean inventory script:
https://github.com/ansible/ansible/blob/devel/contrib/inventory/digital_ocean.py
:author: Andreas Härpfer <[email protected]>
"""
from __future__ import print_function
__version__ = '0.2'
import datetime
import os
import sys
import optparse # Legacy ... 2.6 still out there
import socket
import subprocess
try:
import json
except ImportError:
import simplejson as json
try:
maketrans = str.maketrans # Python 3
except AttributeError:
from string import maketrans # Python 2
class OMDLivestatusInventory(object):
#: default socket path
_def_socket_path = u'/tmp/run/live'
#: Livestatus query string
_def_host_query = (u'GET hosts\n'
'Columns: address name alias groups host_custom_variables\n'
'OutputFormat: json\n')
#: string of bad characters in host or group names
_bad_chars = u'.,;:[]/ '
#: replacement char for bad chars
_replacement_char = u'_'
def __init__(self, location=None, method='socket', by_ip=False):
self.data = {}
self.inventory = {}
self.method = method
#: translation table for sanitizing group names
#
# See the following to find out why this can't be a class variable:
# http://stackoverflow.com/questions/13905741/accessing-class-variables-from-a-list-comprehension-in-the-class-definition
# This version only works for byte strings but not for unicode :-(
#self._trans_table = maketrans(
# self._bad_chars, self._replacement_char * len(_bad_chars))
# Unicode version; see also:
# http://stackoverflow.com/questions/1324067/how-do-i-get-str-translate-to-work-with-unicode-strings
self._trans_table = dict((ord(char), self._replacement_char)
for char in self._bad_chars)
if not location:
if 'OMD_LIVESTATUS_SOCKET' in os.environ:
self.location = os.environ['OMD_LIVESTATUS_SOCKET']
elif 'OMD_ROOT' in os.environ:
self.location = (os.environ['OMD_ROOT']
+ OMDLivestatusInventory._def_socket_path)
else:
raise EnvironmentError(
'Unable to determine location of Livestatus socket.')
else:
self.location = location
self.load_from_omd()
if by_ip:
self.build_inventory_by_ip()
else:
self.build_inventory_by_name()
def load_from_omd(self):
"""Read host data from livestatus socket.
Populates self.data['hosts'].
"""
self.data['hosts'] = []
if self.method == 'ssh':
answer = json.loads(self._read_from_ssh())
else:
answer = json.loads(self._read_from_socket())
for host in answer:
self.data['hosts'].append(
dict(zip((u'ip', u'name', u'alias', u'groups', u'custom_vars'),
host)))
def _read_from_socket(self):
"""Read data from local Livestatus socket."""
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.location)
s.send(OMDLivestatusInventory._def_host_query.encode('utf-8'))
s.shutdown(socket.SHUT_WR)
return s.recv(100000000).decode('utf-8')
def _read_from_ssh(self):
"""Read data from remote Livestatus socket via SSH.
Assumes non-interactive (e.g. via ssh-agent) access to the
remote host. The `unixcat` command (part of Livestatus) has to
be available via $PATH at the remote end.
"""
l = self.location.split(':', 1)
l.append('.' + OMDLivestatusInventory._def_socket_path)
host, path = l[0], l[1]
cmd = ['ssh', host,
'-o', 'BatchMode=yes',
'-o', 'ConnectTimeout=10',
'unixcat {0}'.format(path)]
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(
input=OMDLivestatusInventory._def_host_query.encode('utf-8'))
if p.returncode:
raise RuntimeError(err)
return out.decode('utf-8')
def build_inventory_by_ip(self):
"""Create Ansible inventory by IP address instead of by name.
Cave: contrary to hostnames IP addresses are not guaranteed to
be unique in OMD! Since there is only one set of hostvars for a
given IP, duplicate IPs might mean that you are loosing data.
When creating static inventory output we issue a warning for
duplicate IPs. For the default JSON output this warning is
suppressed since Ansible discards any output on STDERR.
Group names are sanitized to not contain characters that Ansible
can't digest. In particular group names in Ansible must not
contain blanks!
"""
inventory = {}
hostvars = {}
for host in self.data['hosts']:
for group in host['groups'] or [u'_NOGROUP']:
sanitized_group = group.translate(self._trans_table)
if sanitized_group in inventory:
inventory[sanitized_group].append(host['ip'])
else:
inventory[sanitized_group] = [host['ip']]
# Detect duplicate IPs in inventory. Keep first occurence
# in hostvars instead of overwriting with later data.
ip = host['ip']
if ip not in hostvars:
hostvars[ip] = {
'omd_name': host['name'],<|fim▁hole|> 'omd_alias': host['alias'],
'omd_custom_vars': host['custom_vars'],
}
#else:
# # duplicate IP
# pass
self.inventory = inventory
self.inventory['_meta'] = {
'hostvars': hostvars
}
def build_inventory_by_name(self):
"""Create Ansible inventory by OMD name.
Group names are sanitized to not contain characters that Ansible
can't digest. In particular group names in Ansible must not
contain blanks!
"""
inventory = {}
hostvars = {}
for host in self.data['hosts']:
for group in host['groups'] or [u'_NOGROUP']:
sanitized_group = group.translate(self._trans_table)
if sanitized_group in inventory:
inventory[sanitized_group].append(host['name'])
else:
inventory[sanitized_group] = [host['name']]
hostvars[host['name']] = {
'ansible_host': host['ip'],
'omd_alias': host['alias'],
'omd_custom_vars': host['custom_vars'],
}
self.inventory = inventory
self.inventory['_meta'] = {
'hostvars': hostvars
}
def list(self, indent=None, sort_keys=False):
"""Return full inventory data as JSON."""
return json.dumps(self.inventory, indent=indent, sort_keys=sort_keys)
def host(self, name, indent=None, sort_keys=False):
"""Return hostvars for a single host as JSON."""
if name in self.inventory['_meta']['hostvars']:
return(json.dumps(
self.inventory['_meta']['hostvars'][name],
indent=indent,
sort_keys=sort_keys
))
else:
return("{}")
def static(self):
"""Return data in static inventory format."""
out = []
out.append('# File created: {}'.format(datetime.datetime.now()))
for group in [k for k in self.inventory.keys() if k != '_meta']:
out.append('\n[{0}]'.format(group))
for host in self.inventory[group]:
vars = self.inventory['_meta']['hostvars'][host]
hostvars = []
for varname in vars.keys():
hostvars.append('{0}="{1}"'.format(varname, vars[varname]))
out.append('{0}\t{1}'.format(host, ' '.join(hostvars)))
return '\n'.join(out)
def _save_method(option, opt_str, value, parser):
parser.values.method = opt_str.lstrip('-')
parser.values.location = value
def parse_arguments():
"""Parse command line arguments."""
parser = optparse.OptionParser(version='%prog {0}'.format(__version__))
parser.set_defaults(method='socket')
output_group = optparse.OptionGroup(parser, 'Output formats')
output_group.add_option(
'--list', action='store_true', dest='list', default=False,
help='Return full Ansible inventory as JSON (default action).')
output_group.add_option(
'--host', type='string', dest='host', default=None,
help='Return Ansible hostvars for HOST as JSON.')
output_group.add_option(
'--static', action='store_true', dest='static', default=False,
help='Print inventory in static file format to stdout.')
output_group.add_option(
'--by-ip', action='store_true', dest='by_ip', default=False,
help='Create inventory by IP (instead of the default by name).')
parser.add_option_group(output_group)
connect_group = optparse.OptionGroup(parser, 'Connection options')
connect_group.add_option(
'--socket', type='string', dest='location', default=None,
action='callback', callback=_save_method,
help=('Set path to Livestatus socket. If omitted, try to use '
'$OMD_LIVESTATUS_SOCKET or $OMD_ROOT/tmp/run/live.'
))
connect_group.add_option(
'--ssh', type='string', dest='location', default=None,
action='callback', callback=_save_method,
help=('Connect to Livestatus socket via SSH. LOCATION has the '
'form [user@]host[:path], the default path is ./tmp/run/live.'
))
parser.add_option_group(connect_group)
opts, args = parser.parse_args()
# Make `list` the default action.
if not opts.host:
opts.list = True
return opts, args
if __name__ == '__main__':
opts, args = parse_arguments()
inv = OMDLivestatusInventory(opts.location,
method=opts.method,
by_ip=opts.by_ip)
if opts.static:
print(inv.static())
elif opts.list:
print(inv.list(indent=4, sort_keys=True))
elif opts.host:
print(inv.host(opts.host, indent=4, sort_keys=True))
else:
print('Missing command.')
sys.exit(1)<|fim▁end|> | |
<|file_name|>payload.test.ts<|end_file_name|><|fim▁begin|>import knex, { Knex } from 'knex';
import { MockClient, Tracker, getTracker } from 'knex-mock-client';
import { PayloadService } from '../../src/services';
jest.mock('../../src/database/index', () => {
return { getDatabaseClient: jest.fn().mockReturnValue('postgres') };
});
jest.requireMock('../../src/database/index');
describe('Integration Tests', () => {
let db: jest.Mocked<Knex>;
let tracker: Tracker;
beforeAll(async () => {
db = knex({ client: MockClient }) as jest.Mocked<Knex>;
tracker = getTracker();
});
afterEach(() => {
tracker.reset();
});
describe('Services / PayloadService', () => {
describe('transformers', () => {
let service: PayloadService;
beforeEach(() => {
service = new PayloadService('test', {
knex: db,
schema: { collections: {}, relations: [] },
});
});
describe('csv', () => {
it('Returns undefined for illegal values', async () => {
const result = await service.transformers.csv({
value: 123,
action: 'read',
payload: {},
accountability: { role: null },
specials: [],
});
expect(result).toBe(undefined);
});
it('Returns [] for empty strings', async () => {
const result = await service.transformers.csv({
value: '',
action: 'read',
payload: {},
accountability: { role: null },
specials: [],
});
expect(result).toMatchObject([]);
});
it('Splits the CSV string', async () => {
const result = await service.transformers.csv({
value: 'test,directus',
action: 'read',
payload: {},
accountability: { role: null },
specials: [],
});
expect(result).toMatchObject(['test', 'directus']);
});
it('Saves array values as joined string', async () => {
const result = await service.transformers.csv({
value: ['test', 'directus'],
action: 'create',
payload: {},
accountability: { role: null },
specials: [],
});<|fim▁hole|> it('Saves string values as is', async () => {
const result = await service.transformers.csv({
value: 'test,directus',
action: 'create',
payload: {},
accountability: { role: null },
specials: [],
});
expect(result).toBe('test,directus');
});
});
});
});
});<|fim▁end|> |
expect(result).toBe('test,directus');
});
|
<|file_name|>test_multidb.py<|end_file_name|><|fim▁begin|>import unittest
from django.db import connection, migrations, models
from django.db.migrations.state import ProjectState
from django.test import override_settings
from .test_operations import OperationTestBase
try:
import sqlparse
except ImportError:
sqlparse = None
class AgnosticRouter(object):
"""
A router that doesn't have an opinion regarding migrating.
"""
def allow_migrate(self, db, model, **hints):
return None
class MigrateNothingRouter(object):
"""
A router that doesn't allow migrating.
"""
def allow_migrate(self, db, model, **hints):
return False<|fim▁hole|> """
A router that always allows migrating.
"""
def allow_migrate(self, db, model, **hints):
return True
class MigrateWhenFooRouter(object):
"""
A router that allows migrating depending on a hint.
"""
def allow_migrate(self, db, model, **hints):
return hints.get('foo', False)
class MultiDBOperationTests(OperationTestBase):
multi_db = True
def _test_create_model(self, app_label, should_run):
"""
Tests that CreateModel honours multi-db settings.
"""
operation = migrations.CreateModel(
"Pony",
[("id", models.AutoField(primary_key=True))],
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Test the database alteration
self.assertTableNotExists("%s_pony" % app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if should_run:
self.assertTableExists("%s_pony" % app_label)
else:
self.assertTableNotExists("%s_pony" % app_label)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertTableNotExists("%s_pony" % app_label)
@override_settings(DATABASE_ROUTERS=[AgnosticRouter()])
def test_create_model(self):
"""
Test when router doesn't have an opinion (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo", should_run=True)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_create_model2(self):
"""
Test when router returns False (i.e. CreateModel shouldn't run).
"""
self._test_create_model("test_mltdb_crmo2", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()])
def test_create_model3(self):
"""
Test when router returns True (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo3", should_run=True)
def test_create_model4(self):
"""
Test multiple routers.
"""
with override_settings(DATABASE_ROUTERS=[AgnosticRouter(), AgnosticRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
with override_settings(DATABASE_ROUTERS=[MigrateNothingRouter(), MigrateEverythingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=False)
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter(), MigrateNothingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
def _test_run_sql(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
sql = """
INSERT INTO {0}_pony (pink, weight) VALUES (1, 3.55);
INSERT INTO {0}_pony (pink, weight) VALUES (3, 5.0);
""".format(app_label)
operation = migrations.RunSQL(sql, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_sql(self):
self._test_run_sql("test_mltdb_runsql", should_run=False)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_sql2(self):
self._test_run_sql("test_mltdb_runsql2", should_run=False)
self._test_run_sql("test_mltdb_runsql2", should_run=True, hints={'foo': True})
def _test_run_python(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model(app_label, "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
operation = migrations.RunPython(inner_method, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_python(self):
self._test_run_python("test_mltdb_runpython", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_python2(self):
self._test_run_python("test_mltdb_runpython2", should_run=False)
self._test_run_python("test_mltdb_runpython2", should_run=True, hints={'foo': True})<|fim▁end|> |
class MigrateEverythingRouter(object): |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># MIT License
#
# Copyright (C) IBM Corporation 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the<|fim▁hole|>#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Machine learning models with differential privacy
"""
from diffprivlib.models.naive_bayes import GaussianNB
from diffprivlib.models.k_means import KMeans
from diffprivlib.models.linear_regression import LinearRegression
from diffprivlib.models.logistic_regression import LogisticRegression
from diffprivlib.models.pca import PCA
from diffprivlib.models.standard_scaler import StandardScaler
from diffprivlib.models.forest import RandomForestClassifier<|fim▁end|> | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions: |
<|file_name|>http_client.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2010-2011, 2015
# Nexa Center for Internet & Society, Politecnico di Torino (DAUIN),
# and Simone Basso <[email protected]>.
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
''' HTTP client '''
import logging
<|fim▁hole|>from .http_message import HttpMessage
from . import utils
from . import utils_net
class HttpClient(StreamHandler):
''' Manages one or more HTTP streams '''
def __init__(self, poller):
''' Initialize the HTTP client '''
StreamHandler.__init__(self, poller)
self.host_header = ""
self.rtt = 0
def connect_uri(self, uri, count=1):
''' Connects to the given URI '''
try:
message = HttpMessage()
message.compose(method="GET", uri=uri)
if message.scheme == "https":
self.conf["net.stream.secure"] = True
endpoint = (message.address, int(message.port))
self.host_header = utils_net.format_epnt(endpoint)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as why:
self.connection_failed(None, why)
else:
self.connect(endpoint, count)
def connection_ready(self, stream):
''' Invoked when the connection is ready '''
def got_response_headers(self, stream, request, response):
''' Invoked when we receive response headers '''
return True
def got_response(self, stream, request, response):
''' Invoked when we receive the response '''
def connection_made(self, sock, endpoint, rtt):
''' Invoked when the connection is created '''
if rtt:
logging.debug("ClientHTTP: latency: %s", utils.time_formatter(rtt))
self.rtt = rtt
# XXX If we didn't connect via connect_uri()...
if not self.host_header:
self.host_header = utils_net.format_epnt(endpoint)
stream = HttpClientStream(self.poller)
stream.attach(self, sock, self.conf)
self.connection_ready(stream)<|fim▁end|> | from .stream_handler import StreamHandler
from .http_client_stream import HttpClientStream |
<|file_name|>generate.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os
import sys
sys.path.insert(
0,
os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', 'common',
'security-features', 'tools'))
import generate
class ReferrerPolicyConfig(object):
def __init__(self):
self.selection_pattern = \
'%(source_context_list)s.%(delivery_type)s/' + \
'%(delivery_value)s/' + \
'%(subresource)s/' + \
'%(origin)s.%(redirection)s.%(source_scheme)s'
self.test_file_path_pattern = 'gen/' + self.selection_pattern + '.html'
self.test_description_template = 'Referrer Policy: Expects %(expectation)s for %(subresource)s to %(origin)s origin and %(redirection)s redirection from %(source_scheme)s context.'
self.test_page_title_template = 'Referrer-Policy: %s'
<|fim▁hole|> self.spec_json_js = '/referrer-policy/spec_json.js'
self.test_case_name = 'TestCase'
script_directory = os.path.dirname(os.path.abspath(__file__))
self.spec_directory = os.path.abspath(
os.path.join(script_directory, '..', '..'))
if __name__ == '__main__':
generate.main(ReferrerPolicyConfig())<|fim▁end|> | self.helper_js = '/referrer-policy/generic/test-case.sub.js'
# For debug target only.
self.sanity_checker_js = '/referrer-policy/generic/sanity-checker.js' |
<|file_name|>sensor.py<|end_file_name|><|fim▁begin|>"""
Get WHOIS information for a given host.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.whois/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['python-whois==0.7.1']
_LOGGER = logging.getLogger(__name__)
CONF_DOMAIN = 'domain'
DEFAULT_NAME = 'Whois'
ATTR_EXPIRES = 'expires'
ATTR_NAME_SERVERS = 'name_servers'
ATTR_REGISTRAR = 'registrar'
ATTR_UPDATED = 'updated'
SCAN_INTERVAL = timedelta(hours=24)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DOMAIN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the WHOIS sensor."""
import whois
domain = config.get(CONF_DOMAIN)
name = config.get(CONF_NAME)
try:
if 'expiration_date' in whois.whois(domain):
add_entities([WhoisSensor(name, domain)], True)
else:
_LOGGER.error(
"WHOIS lookup for %s didn't contain expiration_date",
domain)
return
except whois.BaseException as ex:
_LOGGER.error(
"Exception %s occurred during WHOIS lookup for %s", ex, domain)
return
class WhoisSensor(Entity):
"""Implementation of a WHOIS sensor."""
def __init__(self, name, domain):
"""Initialize the sensor."""
import whois
self.whois = whois.whois
self._name = name
self._domain = domain
self._state = None
self._attributes = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon to represent this sensor."""
return 'mdi:calendar-clock'
@property
def unit_of_measurement(self):
"""Return the unit of measurement to present the value in."""
return 'days'
@property
def state(self):
"""Return the expiration days for hostname."""
return self._state
@property
def device_state_attributes(self):
"""Get the more info attributes."""
return self._attributes
def _empty_state_and_attributes(self):
"""Empty the state and attributes on an error."""
self._state = None
self._attributes = None
def update(self):
"""Get the current WHOIS data for the domain."""
import whois
try:
response = self.whois(self._domain)
except whois.BaseException as ex:
_LOGGER.error("Exception %s occurred during WHOIS lookup", ex)
self._empty_state_and_attributes()
return
if response:
if 'expiration_date' not in response:
_LOGGER.error(
"Failed to find expiration_date in whois lookup response. "
"Did find: %s", ', '.join(response.keys()))
self._empty_state_and_attributes()
return
if not response['expiration_date']:
_LOGGER.error("Whois response contains empty expiration_date")
self._empty_state_and_attributes()
return
attrs = {}
expiration_date = response['expiration_date']
attrs[ATTR_EXPIRES] = expiration_date.isoformat()
<|fim▁hole|> attrs[ATTR_NAME_SERVERS] = ' '.join(response['nameservers'])
if 'updated_date' in response:
update_date = response['updated_date']
if isinstance(update_date, list):
attrs[ATTR_UPDATED] = update_date[0].isoformat()
else:
attrs[ATTR_UPDATED] = update_date.isoformat()
if 'registrar' in response:
attrs[ATTR_REGISTRAR] = response['registrar']
time_delta = (expiration_date - expiration_date.now())
self._attributes = attrs
self._state = time_delta.days<|fim▁end|> | if 'nameservers' in response: |
<|file_name|>test_models.py<|end_file_name|><|fim▁begin|>import datetime
from decimal import Decimal
from django.utils import translation
import mock
from nose.tools import eq_, ok_
import amo
import amo.tests
from addons.models import Addon, AddonUser
from constants.payments import PROVIDER_BANGO, PROVIDER_BOKU
from market.models import AddonPremium, Price, PriceCurrency, Refund
from mkt.constants import apps
from mkt.constants.regions import (ALL_REGION_IDS, BR, HU,
SPAIN, UK, US, RESTOFWORLD)
from stats.models import Contribution
from users.models import UserProfile
class TestPremium(amo.tests.TestCase):
fixtures = ['market/prices.json', 'base/addon_3615.json']
def setUp(self):
self.tier_one = Price.objects.get(pk=1)
self.addon = Addon.objects.get(pk=3615)
def test_is_complete(self):
ap = AddonPremium(addon=self.addon)
assert not ap.is_complete()
ap.price = self.tier_one
assert not ap.is_complete()
ap.addon.paypal_id = 'asd'
assert ap.is_complete()
class TestPrice(amo.tests.TestCase):
fixtures = ['market/prices.json']
def setUp(self):
self.tier_one = Price.objects.get(pk=1)
if hasattr(Price, '_currencies'):
del Price._currencies # needed to pick up fixtures.
def test_active(self):
eq_(Price.objects.count(), 2)
eq_(Price.objects.active().count(), 1)
def test_active_order(self):
Price.objects.create(name='USD', price='0.00')
Price.objects.create(name='USD', price='1.99')
eq_(list(Price.objects.active().values_list('price', flat=True)),
[Decimal('0.00'), Decimal('0.99'), Decimal('1.99')])
def test_method_default_all(self):
price = Price.objects.create(name='USD', price='0.00')
eq_(price.method, 2)
def test_method_specified(self):
price = Price.objects.create(name='USD', price='0.99', method=0)
eq_(price.method, 0)
def test_currency(self):
eq_(self.tier_one.pricecurrency_set.count(), 3)
def test_get(self):
eq_(Price.objects.get(pk=1).get_price(), Decimal('0.99'))
def test_get_tier(self):<|fim▁hole|>
def test_get_tier_and_locale(self):
translation.activate('pt_BR')
eq_(Price.objects.get(pk=2).get_price(), Decimal('1.99'))
eq_(Price.objects.get(pk=2).get_price_locale(), u'US$1,99')
def test_no_region(self):
eq_(Price.objects.get(pk=2).get_price_locale(region=HU.id), None)
def test_fallback(self):
translation.activate('foo')
eq_(Price.objects.get(pk=1).get_price(), Decimal('0.99'))
eq_(Price.objects.get(pk=1).get_price_locale(), u'$0.99')
def test_transformer(self):
price = Price.objects.get(pk=1)
price.get_price_locale()
# Warm up Price._currencies.
with self.assertNumQueries(0):
eq_(price.get_price_locale(), u'$0.99')
def test_get_tier_price(self):
eq_(Price.objects.get(pk=2).get_price_locale(region=BR.id), 'R$1.01')
def test_get_tier_price_provider(self):
# Because we specify Boku, there is no tier to be found.
eq_(Price.objects.get(pk=2)
.get_price_locale(region=BR.id, provider=PROVIDER_BOKU), None)
# Turning on Boku will give us the tier.
PriceCurrency.objects.get(pk=3).update(provider=PROVIDER_BOKU)
eq_(Price.objects.get(pk=2)
.get_price_locale(region=BR.id, provider=PROVIDER_BOKU), 'R$1.01')
def test_get_free_tier_price(self):
price = self.make_price('0.00')
eq_(price.get_price_locale(region=US.id), '$0.00')
def test_euro_placement(self):
with self.activate('en-us'):
eq_(Price.objects.get(pk=2).get_price_locale(region=SPAIN.id),
u'\u20ac0.50')
with self.activate('es'):
eq_(Price.objects.get(pk=2).get_price_locale(region=SPAIN.id),
u'0,50\xa0\u20ac')
def test_prices(self):
currencies = Price.objects.get(pk=1).prices()
eq_(len(currencies), 2)
eq_(currencies[0]['currency'], 'PLN')
def test_wrong_currency(self):
bad = 4999
ok_(bad not in ALL_REGION_IDS)
ok_(not Price.objects.get(pk=1).get_price('foo', region=bad))
def test_prices_provider(self):
currencies = Price.objects.get(pk=1).prices(provider=PROVIDER_BANGO)
eq_(len(currencies), 2)
def test_multiple_providers(self):
PriceCurrency.objects.get(pk=2).update(provider=PROVIDER_BOKU)
# This used to be 0, so changing it to 3 puts in scope of the filter.
with self.settings(PAYMENT_PROVIDERS=['bango', 'boku']):
currencies = Price.objects.get(pk=1).prices()
eq_(len(currencies), 3)
def test_region_ids_by_name_multi_provider(self):
with self.settings(PAYMENT_PROVIDERS=['bango', 'boku']):
eq_(Price.objects.get(pk=2).region_ids_by_name(),
[BR.id, SPAIN.id, UK.id, RESTOFWORLD.id])
def test_region_ids_by_name(self):
eq_(Price.objects.get(pk=2).region_ids_by_name(),
[BR.id, SPAIN.id, RESTOFWORLD.id])
def test_region_ids_by_name_w_provider_boku(self):
eq_(Price.objects.get(pk=2).region_ids_by_name(
provider=PROVIDER_BOKU), [UK.id])
def test_region_ids_by_name_w_provider_bango(self):
eq_(Price.objects.get(pk=2).region_ids_by_name(
provider=PROVIDER_BANGO), [BR.id, SPAIN.id, RESTOFWORLD.id])
def test_provider_regions(self):
with self.settings(PAYMENT_PROVIDERS=['bango', 'boku']):
eq_(Price.objects.get(pk=2).provider_regions(), {
PROVIDER_BANGO: [BR, SPAIN, RESTOFWORLD],
PROVIDER_BOKU: [UK]})
def test_provider_regions_boku(self):
with self.settings(PAYMENT_PROVIDERS=['boku']):
eq_(Price.objects.get(pk=2).provider_regions(), {
PROVIDER_BOKU: [UK]})
def test_provider_regions_bango(self):
with self.settings(PAYMENT_PROVIDERS=['bango']):
eq_(Price.objects.get(pk=2).provider_regions(), {
PROVIDER_BANGO: [BR, SPAIN, RESTOFWORLD]})
class TestPriceCurrencyChanges(amo.tests.TestCase):
def setUp(self):
self.addon = amo.tests.addon_factory()
self.make_premium(self.addon)
self.currency = self.addon.premium.price.pricecurrency_set.all()[0]
@mock.patch('mkt.webapps.tasks.index_webapps')
def test_save(self, index_webapps):
self.currency.save()
eq_(index_webapps.delay.call_args[0][0], [self.addon.pk])
@mock.patch('mkt.webapps.tasks.index_webapps')
def test_delete(self, index_webapps):
self.currency.delete()
eq_(index_webapps.delay.call_args[0][0], [self.addon.pk])
class ContributionMixin(object):
def setUp(self):
self.addon = Addon.objects.get(pk=3615)
self.user = UserProfile.objects.get(pk=999)
def create(self, type):
return Contribution.objects.create(type=type, addon=self.addon,
user=self.user)
def purchased(self):
return (self.addon.addonpurchase_set
.filter(user=self.user, type=amo.CONTRIB_PURCHASE)
.exists())
def type(self):
return self.addon.addonpurchase_set.get(user=self.user).type
class TestContribution(ContributionMixin, amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/users']
def test_purchase(self):
self.create(amo.CONTRIB_PURCHASE)
assert self.purchased()
def test_refund(self):
self.create(amo.CONTRIB_REFUND)
assert not self.purchased()
def test_purchase_and_refund(self):
self.create(amo.CONTRIB_PURCHASE)
self.create(amo.CONTRIB_REFUND)
assert not self.purchased()
eq_(self.type(), amo.CONTRIB_REFUND)
def test_refund_and_purchase(self):
# This refund does nothing, there was nothing there to refund.
self.create(amo.CONTRIB_REFUND)
self.create(amo.CONTRIB_PURCHASE)
assert self.purchased()
eq_(self.type(), amo.CONTRIB_PURCHASE)
def test_really_cant_decide(self):
self.create(amo.CONTRIB_PURCHASE)
self.create(amo.CONTRIB_REFUND)
self.create(amo.CONTRIB_PURCHASE)
self.create(amo.CONTRIB_REFUND)
self.create(amo.CONTRIB_PURCHASE)
assert self.purchased()
eq_(self.type(), amo.CONTRIB_PURCHASE)
def test_purchase_and_chargeback(self):
self.create(amo.CONTRIB_PURCHASE)
self.create(amo.CONTRIB_CHARGEBACK)
assert not self.purchased()
eq_(self.type(), amo.CONTRIB_CHARGEBACK)
def test_other_user(self):
other = UserProfile.objects.get(email='[email protected]')
Contribution.objects.create(type=amo.CONTRIB_PURCHASE,
addon=self.addon, user=other)
self.create(amo.CONTRIB_PURCHASE)
self.create(amo.CONTRIB_REFUND)
eq_(self.addon.addonpurchase_set.filter(user=other).count(), 1)
def set_role(self, role):
AddonUser.objects.create(addon=self.addon, user=self.user, role=role)
self.create(amo.CONTRIB_PURCHASE)
installed = self.user.installed_set.filter(addon=self.addon)
eq_(installed.count(), 1)
eq_(installed[0].install_type, apps.INSTALL_TYPE_DEVELOPER)
def test_user_dev(self):
self.set_role(amo.AUTHOR_ROLE_DEV)
def test_user_owner(self):
self.set_role(amo.AUTHOR_ROLE_OWNER)
def test_user_installed_dev(self):
self.create(amo.CONTRIB_PURCHASE)
eq_(self.user.installed_set.filter(addon=self.addon).count(), 1)
def test_user_not_purchased(self):
self.addon.update(premium_type=amo.ADDON_PREMIUM)
eq_(list(self.user.purchase_ids()), [])
def test_user_purchased(self):
self.addon.update(premium_type=amo.ADDON_PREMIUM)
self.addon.addonpurchase_set.create(user=self.user)
eq_(list(self.user.purchase_ids()), [3615L])
def test_user_refunded(self):
self.addon.update(premium_type=amo.ADDON_PREMIUM)
self.addon.addonpurchase_set.create(user=self.user,
type=amo.CONTRIB_REFUND)
eq_(list(self.user.purchase_ids()), [])
def test_user_cache(self):
# Tests that the purchase_ids caches.
self.addon.update(premium_type=amo.ADDON_PREMIUM)
eq_(list(self.user.purchase_ids()), [])
self.create(amo.CONTRIB_PURCHASE)
eq_(list(self.user.purchase_ids()), [3615L])
# This caches.
eq_(list(self.user.purchase_ids()), [3615L])
self.create(amo.CONTRIB_REFUND)
eq_(list(self.user.purchase_ids()), [])
class TestRefundContribution(ContributionMixin, amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestRefundContribution, self).setUp()
self.contribution = self.create(amo.CONTRIB_PURCHASE)
def do_refund(self, expected, status, refund_reason=None,
rejection_reason=None):
"""Checks that a refund is enqueued and contains the correct values."""
self.contribution.enqueue_refund(status, self.user,
refund_reason=refund_reason,
rejection_reason=rejection_reason)
expected.update(contribution=self.contribution, status=status)
eq_(Refund.objects.count(), 1)
refund = Refund.objects.filter(**expected)
eq_(refund.exists(), True)
return refund[0]
def test_pending(self):
reason = 'this is bloody bullocks, mate'
expected = dict(refund_reason=reason,
requested__isnull=False,
approved=None,
declined=None)
refund = self.do_refund(expected, amo.REFUND_PENDING, reason)
self.assertCloseToNow(refund.requested)
def test_pending_to_approved(self):
reason = 'this is bloody bullocks, mate'
expected = dict(refund_reason=reason,
requested__isnull=False,
approved=None,
declined=None)
refund = self.do_refund(expected, amo.REFUND_PENDING, reason)
self.assertCloseToNow(refund.requested)
# Change `requested` date to some date in the past.
requested_date = refund.requested - datetime.timedelta(hours=1)
refund.requested = requested_date
refund.save()
expected = dict(refund_reason=reason,
requested__isnull=False,
approved__isnull=False,
declined=None)
refund = self.do_refund(expected, amo.REFUND_APPROVED)
eq_(refund.requested, requested_date,
'Expected date `requested` to remain unchanged.')
self.assertCloseToNow(refund.approved)
def test_approved_instant(self):
expected = dict(refund_reason='',
requested__isnull=False,
approved__isnull=False,
declined=None)
refund = self.do_refund(expected, amo.REFUND_APPROVED_INSTANT)
self.assertCloseToNow(refund.requested)
self.assertCloseToNow(refund.approved)
def test_pending_to_declined(self):
refund_reason = 'please, bro'
rejection_reason = 'sorry, brah'
expected = dict(refund_reason=refund_reason,
rejection_reason='',
requested__isnull=False,
approved=None,
declined=None)
refund = self.do_refund(expected, amo.REFUND_PENDING, refund_reason)
self.assertCloseToNow(refund.requested)
requested_date = refund.requested - datetime.timedelta(hours=1)
refund.requested = requested_date
refund.save()
expected = dict(refund_reason=refund_reason,
rejection_reason=rejection_reason,
requested__isnull=False,
approved=None,
declined__isnull=False)
refund = self.do_refund(expected, amo.REFUND_DECLINED,
rejection_reason=rejection_reason)
eq_(refund.requested, requested_date,
'Expected date `requested` to remain unchanged.')
self.assertCloseToNow(refund.declined)
class TestRefundManager(amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
self.addon = Addon.objects.get(id=3615)
self.user = UserProfile.objects.get(email='[email protected]')
self.expected = {}
for status in amo.REFUND_STATUSES.keys():
c = Contribution.objects.create(addon=self.addon, user=self.user,
type=amo.CONTRIB_PURCHASE)
self.expected[status] = Refund.objects.create(contribution=c,
status=status,
user=self.user)
def test_all(self):
eq_(sorted(Refund.objects.values_list('id', flat=True)),
sorted(e.id for e in self.expected.values()))
def test_pending(self):
eq_(list(Refund.objects.pending(self.addon)),
[self.expected[amo.REFUND_PENDING]])
def test_approved(self):
eq_(list(Refund.objects.approved(self.addon)),
[self.expected[amo.REFUND_APPROVED]])
def test_instant(self):
eq_(list(Refund.objects.instant(self.addon)),
[self.expected[amo.REFUND_APPROVED_INSTANT]])
def test_declined(self):
eq_(list(Refund.objects.declined(self.addon)),
[self.expected[amo.REFUND_DECLINED]])
def test_by_addon(self):
other = Addon.objects.create(type=amo.ADDON_WEBAPP)
c = Contribution.objects.create(addon=other, user=self.user,
type=amo.CONTRIB_PURCHASE)
ref = Refund.objects.create(contribution=c, status=amo.REFUND_DECLINED,
user=self.user)
declined = Refund.objects.filter(status=amo.REFUND_DECLINED)
eq_(sorted(r.id for r in declined),
sorted(r.id for r in [self.expected[amo.REFUND_DECLINED], ref]))
eq_(sorted(r.id for r in Refund.objects.by_addon(addon=self.addon)),
sorted(r.id for r in self.expected.values()))
eq_(list(Refund.objects.by_addon(addon=other)), [ref])<|fim▁end|> | translation.activate('en_CA')
eq_(Price.objects.get(pk=1).get_price(), Decimal('0.99'))
eq_(Price.objects.get(pk=1).get_price_locale(), u'US$0.99') |
<|file_name|>test_fs.py<|end_file_name|><|fim▁begin|>__author__ = "Nitin Kumar, Rick Sherman"
__credits__ = "Jeremy Schulman"
import unittest
from nose.plugins.attrib import attr
import os
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
from jnpr.junos import Device
from jnpr.junos.utils.fs import FS
from mock import patch, MagicMock, call
@attr('unit')
class TestFS(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(host='1.1.1.1', user='rick', password='password123',
gather_facts=False)
self.dev.open()
self.fs = FS(self.dev)
def test_cat_wrong_path_return_none(self):
path = 'test/report'
self.assertEqual(self.fs.cat(path), None)
def test_cat(self):
self.fs._dev.rpc.file_show = MagicMock(side_effect=self._mock_manager)
path = 'test/cat.txt'
self.assertTrue('testing cat functionality' in self.fs.cat(path))
self.fs._dev.rpc.file_show.assert_called_with(filename='test/cat.txt')
def test_cwd(self):
self.fs._dev.rpc.set_cli_working_directory = MagicMock()
folder = 'test/report'
self.fs.cwd(folder)
self.fs._dev.rpc.set_cli_working_directory.\
assert_called_with(directory='test/report')
@patch('jnpr.junos.Device.execute')
def test_pwd(self, mock_execute):
mock_execute.side_effect = MagicMock(side_effect=self._mock_manager)
self.fs.pwd()
self.assertEqual(self.fs.pwd(), '/cf/var/home/rick')
def test_checksum_return_none(self):
path = 'test/report'
self.assertEqual(self.fs.checksum(path), None)
def test_checksum_unknown_calc(self):
path = 'test/report'
self.assertRaises(ValueError, self.fs.checksum, path=path, calc='abc')
def test_checksum_return_rsp(self):
self.fs.dev.rpc.get_sha256_checksum_information = \
MagicMock(side_effect=self._mock_manager)
path = 'test/checksum'
self.assertEqual(self.fs.checksum(path, 'sha256'), 'xxxx')
self.fs.dev.rpc.get_sha256_checksum_information.\
assert_called_with(path='test/checksum')
def test_stat_calling___decode_file(self):
path = 'test/stat/decode_file'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.stat(path),
{'owner': 'pqr', 'path': '/var/abc.sh',
'permissions': 755,
'permissions_text': '-rwxr-xr-x', 'size': 2,
'ts_date': 'Mar 13 06:54',
'ts_epoc': '1394693680',
'type': 'file'})
def test_stat_calling___decode_dir(self):
path = 'test/stat/decode_dir'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.stat(path),
{'path': '/var', 'type': 'dir', 'file_count': 1,
'size': 2})
def test_stat_return_none(self):
path = 'test/abc'
self.fs.dev.rpc.file_list = MagicMock()
self.fs.dev.rpc.file_list.find.return_value = 'output'<|fim▁hole|> path = 'test/stat/decode_file'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path),
{'owner': 'pqr', 'path': '/var/abc.sh',
'permissions': 755,
'permissions_text': '-rwxr-xr-x', 'size': 2,
'ts_date': 'Mar 13 06:54',
'ts_epoc': '1394693680',
'type': 'file'})
def test_ls_calling___decode_dir(self):
path = 'test/stat/decode_dir'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path),
{'files':
{'abc': {'permissions_text': 'drwxr-xr-x',
'ts_date': 'Feb 17 15:30',
'ts_epoc': '1392651039',
'owner': 'root', 'path': 'abc',
'size': 2, 'type': 'dir',
'permissions': 555}},
'path': '/var', 'type': 'dir',
'file_count': 1,
'size': 2})
def test_ls_return_none(self):
path = 'test/abc'
self.fs.dev.rpc.file_list = MagicMock()
self.fs.dev.rpc.file_list.find.return_value = 'output'
self.assertEqual(self.fs.ls(path), None)
@patch('jnpr.junos.utils.fs.FS._decode_file')
def test_ls_link_path_false(self, mock_decode_file):
mock_decode_file.get.return_value = False
path = 'test/stat/decode_file'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.fs.ls(path, followlink=False)
mock_decode_file.assert_has_calls(call().get('link'))
def test_ls_brief_true(self):
path = 'test/stat/decode_dir'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path, brief=True),
{'files': ['abc'], 'path': '/var',
'type': 'dir', 'file_count': 1, 'size': 2})
def test_ls_calling___decode_dir_type_symbolic_link(self):
path = 'test/stat/decode_symbolic_link'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path),
{'files':
{'abc': {'permissions_text': 'drwxr-xr-x',
'ts_date': 'Feb 17 15:30',
'link': 'symlink test',
'ts_epoc': '1392651039',
'owner': 'root', 'path': 'abc',
'size': 2, 'type': 'link',
'permissions': 555}},
'path': '/var', 'type': 'dir', 'file_count': 1,
'size': 2})
def test_rm_return_true(self):
self.fs.dev.rpc.file_delete = MagicMock(return_value=True)
path = 'test/abc'
self.assertTrue(self.fs.rm(path))
self.fs.dev.rpc.file_delete.assert_called_once_with(
path='test/abc')
def test_rm_return_false(self):
path = 'test/abc'
self.fs.dev.rpc.file_delete = MagicMock(return_value=False)
self.assertFalse(self.fs.rm(path))
self.fs.dev.rpc.file_delete.assert_called_once_with(
path='test/abc')
def test_copy_return_true(self):
self.fs.dev.rpc.file_copy = MagicMock()
initial = 'test/abc'
final = 'test/xyz'
self.assertTrue(self.fs.cp(initial, final))
self.fs.dev.rpc.file_copy.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_copy_return_false(self):
initial = 'test/abc'
final = 'test/xyz'
self.fs.dev.rpc.file_copy = MagicMock(side_effect=Exception)
self.assertFalse(self.fs.cp(initial, final))
self.fs.dev.rpc.file_copy.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_move_return_true(self):
self.fs.dev.rpc.file_rename = MagicMock(return_value=True)
initial = 'test/abc'
final = 'test/xyz'
self.assertTrue(self.fs.mv(initial, final))
self.fs.dev.rpc.file_rename.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_move_return_false(self):
initial = 'test/abc'
final = 'test/xyz'
self.fs.dev.rpc.file_rename = MagicMock(return_value=False)
self.assertFalse(self.fs.mv(initial, final))
self.fs.dev.rpc.file_rename.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_tgz_return_true(self):
src = 'test/tgz.txt'
dst = 'test/xyz'
self.fs.dev.rpc.file_archive = MagicMock(return_value=True)
self.assertTrue(self.fs.tgz(src, dst))
self.fs.dev.rpc.file_archive.assert_called_once_with(
source='test/tgz.txt',
destination='test/xyz', compress=True)
@patch('jnpr.junos.Device.execute')
def test_tgz_return_error(self, mock_execute):
mock_execute.side_effect = self._mock_manager
src = 'test/tgz.txt'
dst = 'test/xyz'
self.assertTrue('testing tgz' in self.fs.tgz(src, dst))
@patch('jnpr.junos.utils.fs.StartShell')
def test_rmdir(self, mock_StartShell):
path = 'test/rmdir'
print self.fs.rmdir(path)
calls = [
call().__enter__(),
call().__enter__().run('rmdir test/rmdir'),
call().__exit__(None, None, None)]
mock_StartShell.assert_has_calls(calls)
@patch('jnpr.junos.utils.fs.StartShell')
def test_mkdir(self, mock_StartShell):
path = 'test/mkdir'
print self.fs.mkdir(path)
calls = [
call().__enter__(),
call().__enter__().run('mkdir -p test/mkdir'),
call().__exit__(None, None, None)]
mock_StartShell.assert_has_calls(calls)
@patch('jnpr.junos.utils.fs.StartShell')
def test_symlink(self, mock_StartShell):
src = 'test/tgz.txt'
dst = 'test/xyz'
print self.fs.symlink(src, dst)
calls = [
call().__enter__(),
call().__enter__().run('ln -sf test/tgz.txt test/xyz'),
call().__exit__(None, None, None)]
mock_StartShell.assert_has_calls(calls)
@patch('jnpr.junos.Device.execute')
def test_storage_usage(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.fs.storage_usage(),
{'/dev/abc':
{'avail_block': 234234,
'used_blocks': 2346455, 'used_pct': '1',
'mount': '/', 'total_blocks': 567431,
'avail': '2F', 'used': '481M',
'total': '4F'}})
@patch('jnpr.junos.Device.execute')
def test_storage_cleanup(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.fs.storage_cleanup(),
{'/var/abc.txt':
{'ts_date': 'Apr 25 10:38', 'size': 11}})
@patch('jnpr.junos.Device.execute')
def test_storage_cleanup_check(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.fs.storage_cleanup_check(),
{'/var/abc.txt':
{'ts_date': 'Apr 25 10:38', 'size': 11}})
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__),
'rpc-reply', fname)
foo = open(fpath).read()
if (fname == 'get-rpc-error.xml' or
fname == 'get-index-error.xml' or
fname == 'get-system-core-dumps.xml'):
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())
elif (fname == 'show-configuration.xml' or
fname == 'show-system-alarms.xml'):
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())._NCElement__doc
else:
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs:
# if 'path' in kwargs and 'detail' in kwargs:
# return self._read_file('dir_list_detail.xml')
if 'path' in kwargs:
if kwargs['path'] == 'test/stat/decode_dir':
return self._read_file('file-list_dir.xml')
elif kwargs['path'] == 'test/stat/decode_file':
return self._read_file('file-list_file.xml')
elif kwargs['path'] == 'test/checksum':
return self._read_file('checksum.xml')
elif kwargs['path'] == 'test/stat/decode_symbolic_link':
return self._read_file('file-list_symlink.xml')
if 'filename' in kwargs:
if kwargs['filename'] == 'test/cat.txt':
return self._read_file('file-show.xml')
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
elif args:
if args[0].tag == 'command':
if args[0].text == 'show cli directory':
return self._read_file('show-cli-directory.xml')
elif args[0].tag == 'get-system-storage':
return self._read_file('get-system-storage.xml')
elif args[0].tag == 'request-system-storage-cleanup':
return self._read_file('request-system-storage-cleanup.xml')
elif args[0].tag == 'file-archive':
return self._read_file('file-archive.xml')<|fim▁end|> | self.assertEqual(self.fs.stat(path), None)
def test_ls_calling___decode_file(self): |
<|file_name|>stream.spec.js<|end_file_name|><|fim▁begin|>"use strict";
const readdir = require("../../");
const dir = require("../utils/dir");
const { expect } = require("chai");
const through2 = require("through2");
const fs = require("fs");
let nodeVersion = parseFloat(process.version.substr(1));
describe("Stream API", () => {
it("should be able to pipe to other streams as a Buffer", done => {
let allData = [];
readdir.stream("test/dir")
.pipe(through2((data, enc, next) => {
try {
// By default, the data is streamed as a Buffer
expect(data).to.be.an.instanceOf(Buffer);
// Buffer.toString() returns the file name
allData.push(data.toString());
next(null, data);
}
catch (e) {
next(e);
}
}))
.on("finish", () => {
try {
expect(allData).to.have.same.members(dir.shallow.data);
done();
}
catch (e) {
done(e);
}
})
.on("error", err => {
done(err);
});
});
it('should be able to pipe to other streams in "object mode"', done => {
let allData = [];
readdir.stream("test/dir")
.pipe(through2({ objectMode: true }, (data, enc, next) => {
try {
// In "object mode", the data is a string
expect(data).to.be.a("string");
allData.push(data);
next(null, data);
}
catch (e) {
next(e);
}
}))
.on("finish", () => {
try {
expect(allData).to.have.same.members(dir.shallow.data);
done();
}
catch (e) {
done(e);
}
})
.on("error", err => {
done(err);
});
});
it('should be able to pipe fs.Stats to other streams in "object mode"', done => {
let allData = [];
readdir.stream("test/dir", { stats: true })
.pipe(through2({ objectMode: true }, (data, enc, next) => {
try {
// The data is an fs.Stats object
expect(data).to.be.an("object");
expect(data).to.be.an.instanceOf(fs.Stats);
allData.push(data.path);
next(null, data);
}
catch (e) {
next(e);
}
}))
.on("finish", () => {
try {
expect(allData).to.have.same.members(dir.shallow.data);
done();
}
catch (e) {
done(e);
}
})
.on("error", done);
});
it("should be able to pause & resume the stream", done => {
let allData = [];
let stream = readdir.stream("test/dir")
.on("data", data => {
allData.push(data);
// The stream should not be paused
expect(stream.isPaused()).to.equal(false);
if (allData.length === 3) {
// Pause for one second
stream.pause();
setTimeout(() => {
try {
// The stream should still be paused
expect(stream.isPaused()).to.equal(true);
// The array should still only contain 3 items
expect(allData).to.have.lengthOf(3);
// Read the rest of the stream
stream.resume();
}
catch (e) {
done(e);
}
}, 1000);
}
})
.on("end", () => {
expect(allData).to.have.same.members(dir.shallow.data);
done();
})
.on("error", done);
});
it('should be able to use "readable" and "read"', done => {
let allData = [];
let nullCount = 0;
let stream = readdir.stream("test/dir")
.on("readable", () => {
// Manually read the next chunk of data
let data = stream.read();
while (true) { // eslint-disable-line
if (data === null) {
// The stream is done
nullCount++;
break;
}
else {
// The data should be a string (the file name)
expect(data).to.be.a("string").with.length.of.at.least(1);
allData.push(data);
data = stream.read();
}
}
})
.on("end", () => {
if (nodeVersion >= 12) {
// In Node >= 12, the "readable" event fires twice,
// and stream.read() returns null twice
expect(nullCount).to.equal(2);
}
else if (nodeVersion >= 10) {
// In Node >= 10, the "readable" event only fires once,
// and stream.read() only returns null once
expect(nullCount).to.equal(1);
}
else {
// In Node < 10, the "readable" event fires 13 times (once per file),
// and stream.read() returns null each time
expect(nullCount).to.equal(13);
}
expect(allData).to.have.same.members(dir.shallow.data);
done();
})
.on("error", done);
});
it('should be able to subscribe to custom events instead of "data"', done => {
let allFiles = [];
let allSubdirs = [];
let stream = readdir.stream("test/dir");
// Calling "resume" is required, since we're not handling the "data" event
stream.resume();
stream
.on("file", filename => {
expect(filename).to.be.a("string").with.length.of.at.least(1);
allFiles.push(filename);
})
.on("directory", subdir => {
expect(subdir).to.be.a("string").with.length.of.at.least(1);
allSubdirs.push(subdir);
})
.on("end", () => {
expect(allFiles).to.have.same.members(dir.shallow.files);
expect(allSubdirs).to.have.same.members(dir.shallow.dirs);
done();
})
.on("error", done);
});
it('should handle errors that occur in the "data" event listener', done => {
testErrorHandling("data", dir.shallow.data, 7, done);
});
it('should handle errors that occur in the "file" event listener', done => {
testErrorHandling("file", dir.shallow.files, 3, done);
});
it('should handle errors that occur in the "directory" event listener', done => {
testErrorHandling("directory", dir.shallow.dirs, 2, done);
});
it('should handle errors that occur in the "symlink" event listener', done => {
testErrorHandling("symlink", dir.shallow.symlinks, 5, done);
});
function testErrorHandling (eventName, expected, expectedErrors, done) {
let errors = [], data = [];
let stream = readdir.stream("test/dir");<|fim▁hole|> // Capture all errors
stream.on("error", error => {
errors.push(error);
});
stream.on(eventName, path => {
data.push(path);
if (path.indexOf(".txt") >= 0 || path.indexOf("dir-") >= 0) {
throw new Error("Epic Fail!!!");
}
else {
return true;
}
});
stream.on("end", () => {
try {
// Make sure the correct number of errors were thrown
expect(errors).to.have.lengthOf(expectedErrors);
for (let error of errors) {
expect(error.message).to.equal("Epic Fail!!!");
}
// All of the events should have still been emitted, despite the errors
expect(data).to.have.same.members(expected);
done();
}
catch (e) {
done(e);
}
});
stream.resume();
}
});<|fim▁end|> | |
<|file_name|>SharedCriterion.java<|end_file_name|><|fim▁begin|>package com.google.api.ads.adwords.jaxws.v201509.cm;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlType;
/**
*
* Represents a criterion belonging to a shared set.
*
*
* <p>Java class for SharedCriterion complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="SharedCriterion">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="sharedSetId" type="{http://www.w3.org/2001/XMLSchema}long" minOccurs="0"/>
* <element name="criterion" type="{https://adwords.google.com/api/adwords/cm/v201509}Criterion" minOccurs="0"/>
* <element name="negative" type="{http://www.w3.org/2001/XMLSchema}boolean" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "SharedCriterion", propOrder = {
"sharedSetId",
"criterion",
"negative"
})
public class SharedCriterion {
protected Long sharedSetId;
protected Criterion criterion;
protected Boolean negative;
/**
* Gets the value of the sharedSetId property.
*
* @return
* possible object is
* {@link Long }
*
*/
public Long getSharedSetId() {
return sharedSetId;
}
/**
* Sets the value of the sharedSetId property.
*
* @param value
* allowed object is
* {@link Long }
*
*/
public void setSharedSetId(Long value) {
this.sharedSetId = value;
}
/**
* Gets the value of the criterion property.<|fim▁hole|> * {@link Criterion }
*
*/
public Criterion getCriterion() {
return criterion;
}
/**
* Sets the value of the criterion property.
*
* @param value
* allowed object is
* {@link Criterion }
*
*/
public void setCriterion(Criterion value) {
this.criterion = value;
}
/**
* Gets the value of the negative property.
*
* @return
* possible object is
* {@link Boolean }
*
*/
public Boolean isNegative() {
return negative;
}
/**
* Sets the value of the negative property.
*
* @param value
* allowed object is
* {@link Boolean }
*
*/
public void setNegative(Boolean value) {
this.negative = value;
}
}<|fim▁end|> | *
* @return
* possible object is |
<|file_name|>unwind-tup2.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your<|fim▁hole|>// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[feature(managed_boxes)];
// error-pattern:fail
fn fold_local() -> @~[int]{
@~[0,0,0,0,0,0]
}
fn fold_remote() -> @~[int]{
fail!();
}
fn main() {
let _lss = (fold_local(), fold_remote());
}<|fim▁end|> | |
<|file_name|>builtin.py<|end_file_name|><|fim▁begin|>from django.template import Library
register = Library()
@register.simple_tag(takes_context=True)
def assign(context, **kwargs):
"""
Usage:
{% assign hello="Hello Django" %}
"""
for key, value in kwargs.items():
context[key] = value
return ''
@register.filter
def get(content, key):
"""
Usage:
{% object|get:key|get:key %}
"""
if isinstance(content, dict):
return content.get(key, '')
if isinstance(content, object):
return getattr(content, key, '')
return ''
@register.simple_tag()
def call(fn, *args, **kwargs):
"""<|fim▁hole|> Callable function should be decorated with
redisca.template.decorators.template_func.
"""
if callable(fn):
return fn(*args, **kwargs)
return fn<|fim▁end|> | Usage:
{% call object.method *args **kwargs %} |
<|file_name|>ganeti.rapi.client_unittest.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright (C) 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for unittesting the RAPI client module"""
import unittest
import warnings
import pycurl
from ganeti import opcodes
from ganeti import constants
from ganeti import http
from ganeti import serializer
from ganeti import utils
from ganeti import query
from ganeti import objects
from ganeti import rapi
from ganeti import errors
import ganeti.rapi.testutils
from ganeti.rapi import connector
from ganeti.rapi import rlib2
from ganeti.rapi import client
import testutils
# List of resource handlers which aren't used by the RAPI client
_KNOWN_UNUSED = set([
rlib2.R_root,
rlib2.R_2,
])
# Global variable for collecting used handlers
_used_handlers = None
class RapiMock(object):
def __init__(self):
self._mapper = connector.Mapper()
self._responses = []
self._last_handler = None
self._last_req_data = None
def ResetResponses(self):
del self._responses[:]
def AddResponse(self, response, code=200):
self._responses.insert(0, (code, response))
def CountPending(self):
return len(self._responses)
def GetLastHandler(self):
return self._last_handler
def GetLastRequestData(self):
return self._last_req_data
def FetchResponse(self, path, method, headers, request_body):
self._last_req_data = request_body
try:
(handler_cls, items, args) = self._mapper.getController(path)
# Record handler as used
_used_handlers.add(handler_cls)
self._last_handler = handler_cls(items, args, None)
if not hasattr(self._last_handler, method.upper()):
raise http.HttpNotImplemented(message="Method not implemented")
except http.HttpException, ex:
code = ex.code
response = ex.message
else:
if not self._responses:
raise Exception("No responses")
(code, response) = self._responses.pop()
return (code, NotImplemented, response)
class TestConstants(unittest.TestCase):
def test(self):
self.assertEqual(client.GANETI_RAPI_PORT, constants.DEFAULT_RAPI_PORT)
self.assertEqual(client.GANETI_RAPI_VERSION, constants.RAPI_VERSION)
self.assertEqual(client.HTTP_APP_JSON, http.HTTP_APP_JSON)
self.assertEqual(client._REQ_DATA_VERSION_FIELD, rlib2._REQ_DATA_VERSION)
self.assertEqual(client.JOB_STATUS_QUEUED, constants.JOB_STATUS_QUEUED)
self.assertEqual(client.JOB_STATUS_WAITING, constants.JOB_STATUS_WAITING)
self.assertEqual(client.JOB_STATUS_CANCELING,
constants.JOB_STATUS_CANCELING)
self.assertEqual(client.JOB_STATUS_RUNNING, constants.JOB_STATUS_RUNNING)
self.assertEqual(client.JOB_STATUS_CANCELED, constants.JOB_STATUS_CANCELED)
self.assertEqual(client.JOB_STATUS_SUCCESS, constants.JOB_STATUS_SUCCESS)
self.assertEqual(client.JOB_STATUS_ERROR, constants.JOB_STATUS_ERROR)
self.assertEqual(client.JOB_STATUS_PENDING, constants.JOBS_PENDING)
self.assertEqual(client.JOB_STATUS_FINALIZED, constants.JOBS_FINALIZED)
self.assertEqual(client.JOB_STATUS_ALL, constants.JOB_STATUS_ALL)
# Node evacuation
self.assertEqual(client.NODE_EVAC_PRI, constants.NODE_EVAC_PRI)
self.assertEqual(client.NODE_EVAC_SEC, constants.NODE_EVAC_SEC)
self.assertEqual(client.NODE_EVAC_ALL, constants.NODE_EVAC_ALL)
# Legacy name
self.assertEqual(client.JOB_STATUS_WAITLOCK, constants.JOB_STATUS_WAITING)
# RAPI feature strings
self.assertEqual(client._INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
self.assertEqual(client.INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
self.assertEqual(client._INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
self.assertEqual(client.INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
self.assertEqual(client._NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
self.assertEqual(client.NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
self.assertEqual(client._NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
self.assertEqual(client.NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
def testErrors(self):
self.assertEqual(client.ECODE_ALL, errors.ECODE_ALL)
# Make sure all error codes are in both RAPI client and errors module
for name in filter(lambda s: (s.startswith("ECODE_") and s != "ECODE_ALL"),
dir(client)):
value = getattr(client, name)
self.assertEqual(value, getattr(errors, name))
self.assertTrue(value in client.ECODE_ALL)
self.assertTrue(value in errors.ECODE_ALL)
class RapiMockTest(unittest.TestCase):
def test404(self):
(code, _, body) = RapiMock().FetchResponse("/foo", "GET", None, None)
self.assertEqual(code, 404)
self.assertTrue(body is None)
def test501(self):
(code, _, body) = RapiMock().FetchResponse("/version", "POST", None, None)
self.assertEqual(code, 501)
self.assertEqual(body, "Method not implemented")
def test200(self):
rapi = RapiMock()
rapi.AddResponse("2")
(code, _, response) = rapi.FetchResponse("/version", "GET", None, None)
self.assertEqual(200, code)
self.assertEqual("2", response)
self.failUnless(isinstance(rapi.GetLastHandler(), rlib2.R_version))
def _FakeNoSslPycurlVersion():
# Note: incomplete version tuple
return (3, "7.16.0", 462848, "mysystem", 1581, None, 0)
def _FakeFancySslPycurlVersion():
# Note: incomplete version tuple
return (3, "7.16.0", 462848, "mysystem", 1581, "FancySSL/1.2.3", 0)
def _FakeOpenSslPycurlVersion():
# Note: incomplete version tuple
return (2, "7.15.5", 462597, "othersystem", 668, "OpenSSL/0.9.8c", 0)
def _FakeGnuTlsPycurlVersion():
# Note: incomplete version tuple
return (3, "7.18.0", 463360, "somesystem", 1581, "GnuTLS/2.0.4", 0)
class TestExtendedConfig(unittest.TestCase):
def testAuth(self):
cl = client.GanetiRapiClient("master.example.com",
username="user", password="pw",
curl_factory=lambda: rapi.testutils.FakeCurl(RapiMock()))
curl = cl._CreateCurl()
self.assertEqual(curl.getopt(pycurl.HTTPAUTH), pycurl.HTTPAUTH_BASIC)
self.assertEqual(curl.getopt(pycurl.USERPWD), "user:pw")
def testInvalidAuth(self):
# No username
self.assertRaises(client.Error, client.GanetiRapiClient,
"master-a.example.com", password="pw")
# No password
self.assertRaises(client.Error, client.GanetiRapiClient,
"master-b.example.com", username="user")
def testCertVerifyInvalidCombinations(self):
self.assertRaises(client.Error, client.GenericCurlConfig,
use_curl_cabundle=True, cafile="cert1.pem")
self.assertRaises(client.Error, client.GenericCurlConfig,
use_curl_cabundle=True, capath="certs/")
self.assertRaises(client.Error, client.GenericCurlConfig,
use_curl_cabundle=True,
cafile="cert1.pem", capath="certs/")
def testProxySignalVerifyHostname(self):
for use_gnutls in [False, True]:
if use_gnutls:
pcverfn = _FakeGnuTlsPycurlVersion
else:
pcverfn = _FakeOpenSslPycurlVersion
for proxy in ["", "http://127.0.0.1:1234"]:
for use_signal in [False, True]:
for verify_hostname in [False, True]:
cfgfn = client.GenericCurlConfig(proxy=proxy, use_signal=use_signal,
verify_hostname=verify_hostname,
_pycurl_version_fn=pcverfn)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com",
curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assertEqual(curl.getopt(pycurl.PROXY), proxy)
self.assertEqual(curl.getopt(pycurl.NOSIGNAL), not use_signal)
if verify_hostname:
self.assertEqual(curl.getopt(pycurl.SSL_VERIFYHOST), 2)
else:
self.assertEqual(curl.getopt(pycurl.SSL_VERIFYHOST), 0)
def testNoCertVerify(self):
cfgfn = client.GenericCurlConfig()
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assertFalse(curl.getopt(pycurl.SSL_VERIFYPEER))
self.assertFalse(curl.getopt(pycurl.CAINFO))
self.assertFalse(curl.getopt(pycurl.CAPATH))
def testCertVerifyCurlBundle(self):
cfgfn = client.GenericCurlConfig(use_curl_cabundle=True)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assert_(curl.getopt(pycurl.SSL_VERIFYPEER))
self.assertFalse(curl.getopt(pycurl.CAINFO))
self.assertFalse(curl.getopt(pycurl.CAPATH))
def testCertVerifyCafile(self):
mycert = "/tmp/some/UNUSED/cert/file.pem"
cfgfn = client.GenericCurlConfig(cafile=mycert)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assert_(curl.getopt(pycurl.SSL_VERIFYPEER))
self.assertEqual(curl.getopt(pycurl.CAINFO), mycert)
self.assertFalse(curl.getopt(pycurl.CAPATH))
def testCertVerifyCapath(self):
certdir = "/tmp/some/UNUSED/cert/directory"
pcverfn = _FakeOpenSslPycurlVersion
cfgfn = client.GenericCurlConfig(capath=certdir,
_pycurl_version_fn=pcverfn)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assert_(curl.getopt(pycurl.SSL_VERIFYPEER))
self.assertEqual(curl.getopt(pycurl.CAPATH), certdir)
self.assertFalse(curl.getopt(pycurl.CAINFO))
def testCertVerifyCapathGnuTls(self):
certdir = "/tmp/some/UNUSED/cert/directory"
pcverfn = _FakeGnuTlsPycurlVersion
cfgfn = client.GenericCurlConfig(capath=certdir,
_pycurl_version_fn=pcverfn)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
self.assertRaises(client.Error, cl._CreateCurl)
def testCertVerifyNoSsl(self):
certdir = "/tmp/some/UNUSED/cert/directory"
pcverfn = _FakeNoSslPycurlVersion
cfgfn = client.GenericCurlConfig(capath=certdir,
_pycurl_version_fn=pcverfn)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
self.assertRaises(client.Error, cl._CreateCurl)
def testCertVerifyFancySsl(self):
certdir = "/tmp/some/UNUSED/cert/directory"
pcverfn = _FakeFancySslPycurlVersion
cfgfn = client.GenericCurlConfig(capath=certdir,
_pycurl_version_fn=pcverfn)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
self.assertRaises(NotImplementedError, cl._CreateCurl)
def testCertVerifyCapath(self):
for connect_timeout in [None, 1, 5, 10, 30, 60, 300]:
for timeout in [None, 1, 30, 60, 3600, 24 * 3600]:
cfgfn = client.GenericCurlConfig(connect_timeout=connect_timeout,
timeout=timeout)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assertEqual(curl.getopt(pycurl.CONNECTTIMEOUT), connect_timeout)
self.assertEqual(curl.getopt(pycurl.TIMEOUT), timeout)
class GanetiRapiClientTests(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.rapi = RapiMock()
self.curl = rapi.testutils.FakeCurl(self.rapi)
self.client = client.GanetiRapiClient("master.example.com",
curl_factory=lambda: self.curl)
def assertHandler(self, handler_cls):
self.failUnless(isinstance(self.rapi.GetLastHandler(), handler_cls))
def assertQuery(self, key, value):
self.assertEqual(value, self.rapi.GetLastHandler().queryargs.get(key, None))
def assertItems(self, items):
self.assertEqual(items, self.rapi.GetLastHandler().items)
def assertBulk(self):
self.assertTrue(self.rapi.GetLastHandler().useBulk())
def assertDryRun(self):
self.assertTrue(self.rapi.GetLastHandler().dryRun())
def assertUseForce(self):
self.assertTrue(self.rapi.GetLastHandler().useForce())
def testEncodeQuery(self):
query = [
("a", None),
("b", 1),
("c", 2),
("d", "Foo"),
("e", True),
]
expected = [
("a", ""),
("b", 1),
("c", 2),
("d", "Foo"),
("e", 1),
]
self.assertEqualValues(self.client._EncodeQuery(query),
expected)
# invalid types
for i in [[1, 2, 3], {"moo": "boo"}, (1, 2, 3)]:
self.assertRaises(ValueError, self.client._EncodeQuery, [("x", i)])
def testCurlSettings(self):
self.rapi.AddResponse("2")
self.assertEqual(2, self.client.GetVersion())
self.assertHandler(rlib2.R_version)
# Signals should be disabled by default
self.assert_(self.curl.getopt(pycurl.NOSIGNAL))
# No auth and no proxy
self.assertFalse(self.curl.getopt(pycurl.USERPWD))
self.assert_(self.curl.getopt(pycurl.PROXY) is None)
# Content-type is required for requests
headers = self.curl.getopt(pycurl.HTTPHEADER)
self.assert_("Content-type: application/json" in headers)
def testHttpError(self):
self.rapi.AddResponse(None, code=404)
try:
self.client.GetJobStatus(15140)
except client.GanetiApiError, err:
self.assertEqual(err.code, 404)
else:
self.fail("Didn't raise exception")
def testGetVersion(self):
self.rapi.AddResponse("2")
self.assertEqual(2, self.client.GetVersion())
self.assertHandler(rlib2.R_version)
def testGetFeatures(self):
for features in [[], ["foo", "bar", "baz"]]:
self.rapi.AddResponse(serializer.DumpJson(features))
self.assertEqual(features, self.client.GetFeatures())
self.assertHandler(rlib2.R_2_features)
def testGetFeaturesNotFound(self):
self.rapi.AddResponse(None, code=404)
self.assertEqual([], self.client.GetFeatures())
def testGetOperatingSystems(self):
self.rapi.AddResponse("[\"beos\"]")
self.assertEqual(["beos"], self.client.GetOperatingSystems())
self.assertHandler(rlib2.R_2_os)
def testGetClusterTags(self):
self.rapi.AddResponse("[\"tag\"]")
self.assertEqual(["tag"], self.client.GetClusterTags())
self.assertHandler(rlib2.R_2_tags)
def testAddClusterTags(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234,
self.client.AddClusterTags(["awesome"], dry_run=True))
self.assertHandler(rlib2.R_2_tags)
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testDeleteClusterTags(self):
self.rapi.AddResponse("5107")
self.assertEqual(5107, self.client.DeleteClusterTags(["awesome"],
dry_run=True))
self.assertHandler(rlib2.R_2_tags)
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testGetInfo(self):
self.rapi.AddResponse("{}")
self.assertEqual({}, self.client.GetInfo())
self.assertHandler(rlib2.R_2_info)
def testGetInstances(self):
self.rapi.AddResponse("[]")
self.assertEqual([], self.client.GetInstances(bulk=True))
self.assertHandler(rlib2.R_2_instances)
self.assertBulk()
def testGetInstance(self):
self.rapi.AddResponse("[]")
self.assertEqual([], self.client.GetInstance("instance"))
self.assertHandler(rlib2.R_2_instances_name)
self.assertItems(["instance"])
def testGetInstanceInfo(self):
self.rapi.AddResponse("21291")
self.assertEqual(21291, self.client.GetInstanceInfo("inst3"))
self.assertHandler(rlib2.R_2_instances_name_info)
self.assertItems(["inst3"])
self.assertQuery("static", None)
self.rapi.AddResponse("3428")
self.assertEqual(3428, self.client.GetInstanceInfo("inst31", static=False))
self.assertHandler(rlib2.R_2_instances_name_info)
self.assertItems(["inst31"])
self.assertQuery("static", ["0"])
self.rapi.AddResponse("15665")
self.assertEqual(15665, self.client.GetInstanceInfo("inst32", static=True))
self.assertHandler(rlib2.R_2_instances_name_info)
self.assertItems(["inst32"])
self.assertQuery("static", ["1"])
def testInstancesMultiAlloc(self):
response = {
constants.JOB_IDS_KEY: ["23423"],
constants.ALLOCATABLE_KEY: ["foobar"],
constants.FAILED_KEY: ["foobar2"],
}
self.rapi.AddResponse(serializer.DumpJson(response))
insts = [self.client.InstanceAllocation("create", "foobar",
"plain", [], []),
self.client.InstanceAllocation("create", "foobar2",
"drbd8", [{"size": 100}], [])]
resp = self.client.InstancesMultiAlloc(insts)
self.assertEqual(resp, response)
self.assertHandler(rlib2.R_2_instances_multi_alloc)
def testCreateInstanceOldVersion(self):
# The old request format, version 0, is no longer supported
self.rapi.AddResponse(None, code=404)
self.assertRaises(client.GanetiApiError, self.client.CreateInstance,
"create", "inst1.example.com", "plain", [], [])
self.assertEqual(self.rapi.CountPending(), 0)
def testCreateInstance(self):
self.rapi.AddResponse(serializer.DumpJson([rlib2._INST_CREATE_REQV1]))
self.rapi.AddResponse("23030")
job_id = self.client.CreateInstance("create", "inst1.example.com",
"plain", [], [], dry_run=True)
self.assertEqual(job_id, 23030)
self.assertHandler(rlib2.R_2_instances)
self.assertDryRun()
data = serializer.LoadJson(self.rapi.GetLastRequestData())
for field in ["dry_run", "beparams", "hvparams", "start"]:
self.assertFalse(field in data)
self.assertEqual(data["name"], "inst1.example.com")
self.assertEqual(data["disk_template"], "plain")
def testCreateInstance2(self):
self.rapi.AddResponse(serializer.DumpJson([rlib2._INST_CREATE_REQV1]))
self.rapi.AddResponse("24740")
job_id = self.client.CreateInstance("import", "inst2.example.com",
"drbd8", [{"size": 100,}],
[{}, {"bridge": "br1", }],
dry_run=False, start=True,
pnode="node1", snode="node9",
ip_check=False)
self.assertEqual(job_id, 24740)
self.assertHandler(rlib2.R_2_instances)
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(data[rlib2._REQ_DATA_VERSION], 1)
self.assertEqual(data["name"], "inst2.example.com")
self.assertEqual(data["disk_template"], "drbd8")
self.assertEqual(data["start"], True)
self.assertEqual(data["ip_check"], False)
self.assertEqualValues(data["disks"], [{"size": 100,}])
self.assertEqualValues(data["nics"], [{}, {"bridge": "br1", }])
def testDeleteInstance(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234, self.client.DeleteInstance("instance", dry_run=True))
self.assertHandler(rlib2.R_2_instances_name)
self.assertItems(["instance"])
self.assertDryRun()
def testGetInstanceTags(self):
self.rapi.AddResponse("[]")
self.assertEqual([], self.client.GetInstanceTags("fooinstance"))
self.assertHandler(rlib2.R_2_instances_name_tags)
self.assertItems(["fooinstance"])
def testAddInstanceTags(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234,
self.client.AddInstanceTags("fooinstance", ["awesome"], dry_run=True))
self.assertHandler(rlib2.R_2_instances_name_tags)
self.assertItems(["fooinstance"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testDeleteInstanceTags(self):
self.rapi.AddResponse("25826")
self.assertEqual(25826, self.client.DeleteInstanceTags("foo", ["awesome"],
dry_run=True))
self.assertHandler(rlib2.R_2_instances_name_tags)
self.assertItems(["foo"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testRebootInstance(self):
self.rapi.AddResponse("6146")
job_id = self.client.RebootInstance("i-bar", reboot_type="hard",
ignore_secondaries=True, dry_run=True,
reason="Updates")
self.assertEqual(6146, job_id)
self.assertHandler(rlib2.R_2_instances_name_reboot)
self.assertItems(["i-bar"])
self.assertDryRun()
self.assertQuery("type", ["hard"])
self.assertQuery("ignore_secondaries", ["1"])
self.assertQuery("reason", ["Updates"])
def testRebootInstanceDefaultReason(self):
self.rapi.AddResponse("6146")
job_id = self.client.RebootInstance("i-bar", reboot_type="hard",
ignore_secondaries=True, dry_run=True)
self.assertEqual(6146, job_id)
self.assertHandler(rlib2.R_2_instances_name_reboot)
self.assertItems(["i-bar"])
self.assertDryRun()
self.assertQuery("type", ["hard"])
self.assertQuery("ignore_secondaries", ["1"])
self.assertQuery("reason", None)
def testShutdownInstance(self):
self.rapi.AddResponse("1487")
self.assertEqual(1487, self.client.ShutdownInstance("foo-instance",
dry_run=True,
reason="NoMore"))
self.assertHandler(rlib2.R_2_instances_name_shutdown)
self.assertItems(["foo-instance"])
self.assertDryRun()
self.assertQuery("reason", ["NoMore"])
def testShutdownInstanceDefaultReason(self):
self.rapi.AddResponse("1487")
self.assertEqual(1487, self.client.ShutdownInstance("foo-instance",<|fim▁hole|> dry_run=True))
self.assertHandler(rlib2.R_2_instances_name_shutdown)
self.assertItems(["foo-instance"])
self.assertDryRun()
self.assertQuery("reason", None)
def testStartupInstance(self):
self.rapi.AddResponse("27149")
self.assertEqual(27149, self.client.StartupInstance("bar-instance",
dry_run=True,
reason="New"))
self.assertHandler(rlib2.R_2_instances_name_startup)
self.assertItems(["bar-instance"])
self.assertDryRun()
self.assertQuery("reason", ["New"])
def testStartupInstanceDefaultReason(self):
self.rapi.AddResponse("27149")
self.assertEqual(27149, self.client.StartupInstance("bar-instance",
dry_run=True))
self.assertHandler(rlib2.R_2_instances_name_startup)
self.assertItems(["bar-instance"])
self.assertDryRun()
self.assertQuery("reason", None)
def testReinstallInstance(self):
self.rapi.AddResponse(serializer.DumpJson([]))
self.rapi.AddResponse("19119")
self.assertEqual(19119, self.client.ReinstallInstance("baz-instance",
os="DOS",
no_startup=True))
self.assertHandler(rlib2.R_2_instances_name_reinstall)
self.assertItems(["baz-instance"])
self.assertQuery("os", ["DOS"])
self.assertQuery("nostartup", ["1"])
self.assertEqual(self.rapi.CountPending(), 0)
def testReinstallInstanceNew(self):
self.rapi.AddResponse(serializer.DumpJson([rlib2._INST_REINSTALL_REQV1]))
self.rapi.AddResponse("25689")
self.assertEqual(25689, self.client.ReinstallInstance("moo-instance",
os="Debian",
no_startup=True))
self.assertHandler(rlib2.R_2_instances_name_reinstall)
self.assertItems(["moo-instance"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 2)
self.assertEqual(data["os"], "Debian")
self.assertEqual(data["start"], False)
self.assertEqual(self.rapi.CountPending(), 0)
def testReinstallInstanceWithOsparams1(self):
self.rapi.AddResponse(serializer.DumpJson([]))
self.assertRaises(client.GanetiApiError, self.client.ReinstallInstance,
"doo-instance", osparams={"x": "y"})
self.assertEqual(self.rapi.CountPending(), 0)
def testReinstallInstanceWithOsparams2(self):
osparams = {
"Hello": "World",
"foo": "bar",
}
self.rapi.AddResponse(serializer.DumpJson([rlib2._INST_REINSTALL_REQV1]))
self.rapi.AddResponse("1717")
self.assertEqual(1717, self.client.ReinstallInstance("zoo-instance",
osparams=osparams))
self.assertHandler(rlib2.R_2_instances_name_reinstall)
self.assertItems(["zoo-instance"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 2)
self.assertEqual(data["osparams"], osparams)
self.assertEqual(data["start"], True)
self.assertEqual(self.rapi.CountPending(), 0)
def testReplaceInstanceDisks(self):
self.rapi.AddResponse("999")
job_id = self.client.ReplaceInstanceDisks("instance-name",
disks=[0, 1], iallocator="hail")
self.assertEqual(999, job_id)
self.assertHandler(rlib2.R_2_instances_name_replace_disks)
self.assertItems(["instance-name"])
self.assertQuery("disks", ["0,1"])
self.assertQuery("mode", ["replace_auto"])
self.assertQuery("iallocator", ["hail"])
self.rapi.AddResponse("1000")
job_id = self.client.ReplaceInstanceDisks("instance-bar",
disks=[1], mode="replace_on_secondary", remote_node="foo-node")
self.assertEqual(1000, job_id)
self.assertItems(["instance-bar"])
self.assertQuery("disks", ["1"])
self.assertQuery("remote_node", ["foo-node"])
self.rapi.AddResponse("5175")
self.assertEqual(5175, self.client.ReplaceInstanceDisks("instance-moo"))
self.assertItems(["instance-moo"])
self.assertQuery("disks", None)
def testPrepareExport(self):
self.rapi.AddResponse("8326")
self.assertEqual(8326, self.client.PrepareExport("inst1", "local"))
self.assertHandler(rlib2.R_2_instances_name_prepare_export)
self.assertItems(["inst1"])
self.assertQuery("mode", ["local"])
def testExportInstance(self):
self.rapi.AddResponse("19695")
job_id = self.client.ExportInstance("inst2", "local", "nodeX",
shutdown=True)
self.assertEqual(job_id, 19695)
self.assertHandler(rlib2.R_2_instances_name_export)
self.assertItems(["inst2"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(data["mode"], "local")
self.assertEqual(data["destination"], "nodeX")
self.assertEqual(data["shutdown"], True)
def testMigrateInstanceDefaults(self):
self.rapi.AddResponse("24873")
job_id = self.client.MigrateInstance("inst91")
self.assertEqual(job_id, 24873)
self.assertHandler(rlib2.R_2_instances_name_migrate)
self.assertItems(["inst91"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertFalse(data)
def testMigrateInstance(self):
for mode in constants.HT_MIGRATION_MODES:
for cleanup in [False, True]:
self.rapi.AddResponse("31910")
job_id = self.client.MigrateInstance("inst289", mode=mode,
cleanup=cleanup)
self.assertEqual(job_id, 31910)
self.assertHandler(rlib2.R_2_instances_name_migrate)
self.assertItems(["inst289"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 2)
self.assertEqual(data["mode"], mode)
self.assertEqual(data["cleanup"], cleanup)
def testFailoverInstanceDefaults(self):
self.rapi.AddResponse("7639")
job_id = self.client.FailoverInstance("inst13579")
self.assertEqual(job_id, 7639)
self.assertHandler(rlib2.R_2_instances_name_failover)
self.assertItems(["inst13579"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertFalse(data)
def testFailoverInstance(self):
for iallocator in ["dumb", "hail"]:
for ignore_consistency in [False, True]:
for target_node in ["node-a", "node2"]:
self.rapi.AddResponse("19161")
job_id = \
self.client.FailoverInstance("inst251", iallocator=iallocator,
ignore_consistency=ignore_consistency,
target_node=target_node)
self.assertEqual(job_id, 19161)
self.assertHandler(rlib2.R_2_instances_name_failover)
self.assertItems(["inst251"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 3)
self.assertEqual(data["iallocator"], iallocator)
self.assertEqual(data["ignore_consistency"], ignore_consistency)
self.assertEqual(data["target_node"], target_node)
self.assertEqual(self.rapi.CountPending(), 0)
def testRenameInstanceDefaults(self):
new_name = "newnametha7euqu"
self.rapi.AddResponse("8791")
job_id = self.client.RenameInstance("inst18821", new_name)
self.assertEqual(job_id, 8791)
self.assertHandler(rlib2.R_2_instances_name_rename)
self.assertItems(["inst18821"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqualValues(data, {"new_name": new_name, })
def testRenameInstance(self):
new_name = "new-name-yiux1iin"
for ip_check in [False, True]:
for name_check in [False, True]:
self.rapi.AddResponse("24776")
job_id = self.client.RenameInstance("inst20967", new_name,
ip_check=ip_check,
name_check=name_check)
self.assertEqual(job_id, 24776)
self.assertHandler(rlib2.R_2_instances_name_rename)
self.assertItems(["inst20967"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 3)
self.assertEqual(data["new_name"], new_name)
self.assertEqual(data["ip_check"], ip_check)
self.assertEqual(data["name_check"], name_check)
def testGetJobs(self):
self.rapi.AddResponse('[ { "id": "123", "uri": "\\/2\\/jobs\\/123" },'
' { "id": "124", "uri": "\\/2\\/jobs\\/124" } ]')
self.assertEqual([123, 124], self.client.GetJobs())
self.assertHandler(rlib2.R_2_jobs)
self.rapi.AddResponse('[ { "id": "123", "uri": "\\/2\\/jobs\\/123" },'
' { "id": "124", "uri": "\\/2\\/jobs\\/124" } ]')
self.assertEqual([{"id": "123", "uri": "/2/jobs/123"},
{"id": "124", "uri": "/2/jobs/124"}],
self.client.GetJobs(bulk=True))
self.assertHandler(rlib2.R_2_jobs)
self.assertBulk()
def testGetJobStatus(self):
self.rapi.AddResponse("{\"foo\": \"bar\"}")
self.assertEqual({"foo": "bar"}, self.client.GetJobStatus(1234))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["1234"])
def testWaitForJobChange(self):
fields = ["id", "summary"]
expected = {
"job_info": [123, "something"],
"log_entries": [],
}
self.rapi.AddResponse(serializer.DumpJson(expected))
result = self.client.WaitForJobChange(123, fields, [], -1)
self.assertEqualValues(expected, result)
self.assertHandler(rlib2.R_2_jobs_id_wait)
self.assertItems(["123"])
def testCancelJob(self):
self.rapi.AddResponse("[true, \"Job 123 will be canceled\"]")
self.assertEqual([True, "Job 123 will be canceled"],
self.client.CancelJob(999, dry_run=True))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["999"])
self.assertDryRun()
def testGetNodes(self):
self.rapi.AddResponse("[ { \"id\": \"node1\", \"uri\": \"uri1\" },"
" { \"id\": \"node2\", \"uri\": \"uri2\" } ]")
self.assertEqual(["node1", "node2"], self.client.GetNodes())
self.assertHandler(rlib2.R_2_nodes)
self.rapi.AddResponse("[ { \"id\": \"node1\", \"uri\": \"uri1\" },"
" { \"id\": \"node2\", \"uri\": \"uri2\" } ]")
self.assertEqual([{"id": "node1", "uri": "uri1"},
{"id": "node2", "uri": "uri2"}],
self.client.GetNodes(bulk=True))
self.assertHandler(rlib2.R_2_nodes)
self.assertBulk()
def testGetNode(self):
self.rapi.AddResponse("{}")
self.assertEqual({}, self.client.GetNode("node-foo"))
self.assertHandler(rlib2.R_2_nodes_name)
self.assertItems(["node-foo"])
def testEvacuateNode(self):
self.rapi.AddResponse(serializer.DumpJson([rlib2._NODE_EVAC_RES1]))
self.rapi.AddResponse("9876")
job_id = self.client.EvacuateNode("node-1", remote_node="node-2")
self.assertEqual(9876, job_id)
self.assertHandler(rlib2.R_2_nodes_name_evacuate)
self.assertItems(["node-1"])
self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()),
{ "remote_node": "node-2", })
self.assertEqual(self.rapi.CountPending(), 0)
self.rapi.AddResponse(serializer.DumpJson([rlib2._NODE_EVAC_RES1]))
self.rapi.AddResponse("8888")
job_id = self.client.EvacuateNode("node-3", iallocator="hail", dry_run=True,
mode=constants.NODE_EVAC_ALL,
early_release=True)
self.assertEqual(8888, job_id)
self.assertItems(["node-3"])
self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()), {
"iallocator": "hail",
"mode": "all",
"early_release": True,
})
self.assertDryRun()
self.assertRaises(client.GanetiApiError,
self.client.EvacuateNode,
"node-4", iallocator="hail", remote_node="node-5")
self.assertEqual(self.rapi.CountPending(), 0)
def testEvacuateNodeOldResponse(self):
self.rapi.AddResponse(serializer.DumpJson([]))
self.assertRaises(client.GanetiApiError, self.client.EvacuateNode,
"node-4", accept_old=False)
self.assertEqual(self.rapi.CountPending(), 0)
for mode in [client.NODE_EVAC_PRI, client.NODE_EVAC_ALL]:
self.rapi.AddResponse(serializer.DumpJson([]))
self.assertRaises(client.GanetiApiError, self.client.EvacuateNode,
"node-4", accept_old=True, mode=mode)
self.assertEqual(self.rapi.CountPending(), 0)
self.rapi.AddResponse(serializer.DumpJson([]))
self.rapi.AddResponse(serializer.DumpJson("21533"))
result = self.client.EvacuateNode("node-3", iallocator="hail",
dry_run=True, accept_old=True,
mode=client.NODE_EVAC_SEC,
early_release=True)
self.assertEqual(result, "21533")
self.assertItems(["node-3"])
self.assertQuery("iallocator", ["hail"])
self.assertQuery("early_release", ["1"])
self.assertFalse(self.rapi.GetLastRequestData())
self.assertDryRun()
self.assertEqual(self.rapi.CountPending(), 0)
def testMigrateNode(self):
self.rapi.AddResponse(serializer.DumpJson([]))
self.rapi.AddResponse("1111")
self.assertEqual(1111, self.client.MigrateNode("node-a", dry_run=True))
self.assertHandler(rlib2.R_2_nodes_name_migrate)
self.assertItems(["node-a"])
self.assert_("mode" not in self.rapi.GetLastHandler().queryargs)
self.assertDryRun()
self.assertFalse(self.rapi.GetLastRequestData())
self.rapi.AddResponse(serializer.DumpJson([]))
self.rapi.AddResponse("1112")
self.assertEqual(1112, self.client.MigrateNode("node-a", dry_run=True,
mode="live"))
self.assertHandler(rlib2.R_2_nodes_name_migrate)
self.assertItems(["node-a"])
self.assertQuery("mode", ["live"])
self.assertDryRun()
self.assertFalse(self.rapi.GetLastRequestData())
self.rapi.AddResponse(serializer.DumpJson([]))
self.assertRaises(client.GanetiApiError, self.client.MigrateNode,
"node-c", target_node="foonode")
self.assertEqual(self.rapi.CountPending(), 0)
def testMigrateNodeBodyData(self):
self.rapi.AddResponse(serializer.DumpJson([rlib2._NODE_MIGRATE_REQV1]))
self.rapi.AddResponse("27539")
self.assertEqual(27539, self.client.MigrateNode("node-a", dry_run=False,
mode="live"))
self.assertHandler(rlib2.R_2_nodes_name_migrate)
self.assertItems(["node-a"])
self.assertFalse(self.rapi.GetLastHandler().queryargs)
self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()),
{ "mode": "live", })
self.rapi.AddResponse(serializer.DumpJson([rlib2._NODE_MIGRATE_REQV1]))
self.rapi.AddResponse("14219")
self.assertEqual(14219, self.client.MigrateNode("node-x", dry_run=True,
target_node="node9",
iallocator="ial"))
self.assertHandler(rlib2.R_2_nodes_name_migrate)
self.assertItems(["node-x"])
self.assertDryRun()
self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()),
{ "target_node": "node9", "iallocator": "ial", })
self.assertEqual(self.rapi.CountPending(), 0)
def testGetNodeRole(self):
self.rapi.AddResponse("\"master\"")
self.assertEqual("master", self.client.GetNodeRole("node-a"))
self.assertHandler(rlib2.R_2_nodes_name_role)
self.assertItems(["node-a"])
def testSetNodeRole(self):
self.rapi.AddResponse("789")
self.assertEqual(789,
self.client.SetNodeRole("node-foo", "master-candidate", force=True))
self.assertHandler(rlib2.R_2_nodes_name_role)
self.assertItems(["node-foo"])
self.assertQuery("force", ["1"])
self.assertEqual("\"master-candidate\"", self.rapi.GetLastRequestData())
def testPowercycleNode(self):
self.rapi.AddResponse("23051")
self.assertEqual(23051,
self.client.PowercycleNode("node5468", force=True))
self.assertHandler(rlib2.R_2_nodes_name_powercycle)
self.assertItems(["node5468"])
self.assertQuery("force", ["1"])
self.assertFalse(self.rapi.GetLastRequestData())
self.assertEqual(self.rapi.CountPending(), 0)
def testModifyNode(self):
self.rapi.AddResponse("3783")
job_id = self.client.ModifyNode("node16979.example.com", drained=True)
self.assertEqual(job_id, 3783)
self.assertHandler(rlib2.R_2_nodes_name_modify)
self.assertItems(["node16979.example.com"])
self.assertEqual(self.rapi.CountPending(), 0)
def testGetNodeStorageUnits(self):
self.rapi.AddResponse("42")
self.assertEqual(42,
self.client.GetNodeStorageUnits("node-x", "lvm-pv", "fields"))
self.assertHandler(rlib2.R_2_nodes_name_storage)
self.assertItems(["node-x"])
self.assertQuery("storage_type", ["lvm-pv"])
self.assertQuery("output_fields", ["fields"])
def testModifyNodeStorageUnits(self):
self.rapi.AddResponse("14")
self.assertEqual(14,
self.client.ModifyNodeStorageUnits("node-z", "lvm-pv", "hda"))
self.assertHandler(rlib2.R_2_nodes_name_storage_modify)
self.assertItems(["node-z"])
self.assertQuery("storage_type", ["lvm-pv"])
self.assertQuery("name", ["hda"])
self.assertQuery("allocatable", None)
for allocatable, query_allocatable in [(True, "1"), (False, "0")]:
self.rapi.AddResponse("7205")
job_id = self.client.ModifyNodeStorageUnits("node-z", "lvm-pv", "hda",
allocatable=allocatable)
self.assertEqual(7205, job_id)
self.assertHandler(rlib2.R_2_nodes_name_storage_modify)
self.assertItems(["node-z"])
self.assertQuery("storage_type", ["lvm-pv"])
self.assertQuery("name", ["hda"])
self.assertQuery("allocatable", [query_allocatable])
def testRepairNodeStorageUnits(self):
self.rapi.AddResponse("99")
self.assertEqual(99, self.client.RepairNodeStorageUnits("node-z", "lvm-pv",
"hda"))
self.assertHandler(rlib2.R_2_nodes_name_storage_repair)
self.assertItems(["node-z"])
self.assertQuery("storage_type", ["lvm-pv"])
self.assertQuery("name", ["hda"])
def testGetNodeTags(self):
self.rapi.AddResponse("[\"fry\", \"bender\"]")
self.assertEqual(["fry", "bender"], self.client.GetNodeTags("node-k"))
self.assertHandler(rlib2.R_2_nodes_name_tags)
self.assertItems(["node-k"])
def testAddNodeTags(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234,
self.client.AddNodeTags("node-v", ["awesome"], dry_run=True))
self.assertHandler(rlib2.R_2_nodes_name_tags)
self.assertItems(["node-v"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testDeleteNodeTags(self):
self.rapi.AddResponse("16861")
self.assertEqual(16861, self.client.DeleteNodeTags("node-w", ["awesome"],
dry_run=True))
self.assertHandler(rlib2.R_2_nodes_name_tags)
self.assertItems(["node-w"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testGetGroups(self):
groups = [{"name": "group1",
"uri": "/2/groups/group1",
},
{"name": "group2",
"uri": "/2/groups/group2",
},
]
self.rapi.AddResponse(serializer.DumpJson(groups))
self.assertEqual(["group1", "group2"], self.client.GetGroups())
self.assertHandler(rlib2.R_2_groups)
def testGetGroupsBulk(self):
groups = [{"name": "group1",
"uri": "/2/groups/group1",
"node_cnt": 2,
"node_list": ["gnt1.test",
"gnt2.test",
],
},
{"name": "group2",
"uri": "/2/groups/group2",
"node_cnt": 1,
"node_list": ["gnt3.test",
],
},
]
self.rapi.AddResponse(serializer.DumpJson(groups))
self.assertEqual(groups, self.client.GetGroups(bulk=True))
self.assertHandler(rlib2.R_2_groups)
self.assertBulk()
def testGetGroup(self):
group = {"ctime": None,
"name": "default",
}
self.rapi.AddResponse(serializer.DumpJson(group))
self.assertEqual({"ctime": None, "name": "default"},
self.client.GetGroup("default"))
self.assertHandler(rlib2.R_2_groups_name)
self.assertItems(["default"])
def testCreateGroup(self):
self.rapi.AddResponse("12345")
job_id = self.client.CreateGroup("newgroup", dry_run=True)
self.assertEqual(job_id, 12345)
self.assertHandler(rlib2.R_2_groups)
self.assertDryRun()
def testDeleteGroup(self):
self.rapi.AddResponse("12346")
job_id = self.client.DeleteGroup("newgroup", dry_run=True)
self.assertEqual(job_id, 12346)
self.assertHandler(rlib2.R_2_groups_name)
self.assertDryRun()
def testRenameGroup(self):
self.rapi.AddResponse("12347")
job_id = self.client.RenameGroup("oldname", "newname")
self.assertEqual(job_id, 12347)
self.assertHandler(rlib2.R_2_groups_name_rename)
def testModifyGroup(self):
self.rapi.AddResponse("12348")
job_id = self.client.ModifyGroup("mygroup", alloc_policy="foo")
self.assertEqual(job_id, 12348)
self.assertHandler(rlib2.R_2_groups_name_modify)
def testAssignGroupNodes(self):
self.rapi.AddResponse("12349")
job_id = self.client.AssignGroupNodes("mygroup", ["node1", "node2"],
force=True, dry_run=True)
self.assertEqual(job_id, 12349)
self.assertHandler(rlib2.R_2_groups_name_assign_nodes)
self.assertDryRun()
self.assertUseForce()
def testGetNetworksBulk(self):
networks = [{"name": "network1",
"uri": "/2/networks/network1",
"network": "192.168.0.0/24",
},
{"name": "network2",
"uri": "/2/networks/network2",
"network": "192.168.0.0/24",
},
]
self.rapi.AddResponse(serializer.DumpJson(networks))
self.assertEqual(networks, self.client.GetNetworks(bulk=True))
self.assertHandler(rlib2.R_2_networks)
self.assertBulk()
def testGetNetwork(self):
network = {"ctime": None,
"name": "network1",
}
self.rapi.AddResponse(serializer.DumpJson(network))
self.assertEqual({"ctime": None, "name": "network1"},
self.client.GetNetwork("network1"))
self.assertHandler(rlib2.R_2_networks_name)
self.assertItems(["network1"])
def testCreateNetwork(self):
self.rapi.AddResponse("12345")
job_id = self.client.CreateNetwork("newnetwork", network="192.168.0.0/24",
dry_run=True)
self.assertEqual(job_id, 12345)
self.assertHandler(rlib2.R_2_networks)
self.assertDryRun()
def testModifyNetwork(self):
self.rapi.AddResponse("12346")
job_id = self.client.ModifyNetwork("mynetwork", gateway="192.168.0.10",
dry_run=True)
self.assertEqual(job_id, 12346)
self.assertHandler(rlib2.R_2_networks_name_modify)
def testDeleteNetwork(self):
self.rapi.AddResponse("12347")
job_id = self.client.DeleteNetwork("newnetwork", dry_run=True)
self.assertEqual(job_id, 12347)
self.assertHandler(rlib2.R_2_networks_name)
self.assertDryRun()
def testConnectNetwork(self):
self.rapi.AddResponse("12348")
job_id = self.client.ConnectNetwork("mynetwork", "default",
"bridged", "br0", dry_run=True)
self.assertEqual(job_id, 12348)
self.assertHandler(rlib2.R_2_networks_name_connect)
self.assertDryRun()
def testDisconnectNetwork(self):
self.rapi.AddResponse("12349")
job_id = self.client.DisconnectNetwork("mynetwork", "default", dry_run=True)
self.assertEqual(job_id, 12349)
self.assertHandler(rlib2.R_2_networks_name_disconnect)
self.assertDryRun()
def testGetNetworkTags(self):
self.rapi.AddResponse("[]")
self.assertEqual([], self.client.GetNetworkTags("fooNetwork"))
self.assertHandler(rlib2.R_2_networks_name_tags)
self.assertItems(["fooNetwork"])
def testAddNetworkTags(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234,
self.client.AddNetworkTags("fooNetwork", ["awesome"], dry_run=True))
self.assertHandler(rlib2.R_2_networks_name_tags)
self.assertItems(["fooNetwork"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testDeleteNetworkTags(self):
self.rapi.AddResponse("25826")
self.assertEqual(25826, self.client.DeleteNetworkTags("foo", ["awesome"],
dry_run=True))
self.assertHandler(rlib2.R_2_networks_name_tags)
self.assertItems(["foo"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testModifyInstance(self):
self.rapi.AddResponse("23681")
job_id = self.client.ModifyInstance("inst7210", os_name="linux")
self.assertEqual(job_id, 23681)
self.assertItems(["inst7210"])
self.assertHandler(rlib2.R_2_instances_name_modify)
self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()),
{ "os_name": "linux", })
def testModifyCluster(self):
for mnh in [None, False, True]:
self.rapi.AddResponse("14470")
self.assertEqual(14470,
self.client.ModifyCluster(maintain_node_health=mnh))
self.assertHandler(rlib2.R_2_cluster_modify)
self.assertItems([])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 1)
self.assertEqual(data["maintain_node_health"], mnh)
self.assertEqual(self.rapi.CountPending(), 0)
def testRedistributeConfig(self):
self.rapi.AddResponse("3364")
job_id = self.client.RedistributeConfig()
self.assertEqual(job_id, 3364)
self.assertItems([])
self.assertHandler(rlib2.R_2_redist_config)
def testActivateInstanceDisks(self):
self.rapi.AddResponse("23547")
job_id = self.client.ActivateInstanceDisks("inst28204")
self.assertEqual(job_id, 23547)
self.assertItems(["inst28204"])
self.assertHandler(rlib2.R_2_instances_name_activate_disks)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
def testActivateInstanceDisksIgnoreSize(self):
self.rapi.AddResponse("11044")
job_id = self.client.ActivateInstanceDisks("inst28204", ignore_size=True)
self.assertEqual(job_id, 11044)
self.assertItems(["inst28204"])
self.assertHandler(rlib2.R_2_instances_name_activate_disks)
self.assertQuery("ignore_size", ["1"])
def testDeactivateInstanceDisks(self):
self.rapi.AddResponse("14591")
job_id = self.client.DeactivateInstanceDisks("inst28234")
self.assertEqual(job_id, 14591)
self.assertItems(["inst28234"])
self.assertHandler(rlib2.R_2_instances_name_deactivate_disks)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
def testRecreateInstanceDisks(self):
self.rapi.AddResponse("13553")
job_id = self.client.RecreateInstanceDisks("inst23153")
self.assertEqual(job_id, 13553)
self.assertItems(["inst23153"])
self.assertHandler(rlib2.R_2_instances_name_recreate_disks)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
def testGetInstanceConsole(self):
self.rapi.AddResponse("26876")
job_id = self.client.GetInstanceConsole("inst21491")
self.assertEqual(job_id, 26876)
self.assertItems(["inst21491"])
self.assertHandler(rlib2.R_2_instances_name_console)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
self.assertFalse(self.rapi.GetLastRequestData())
def testGrowInstanceDisk(self):
for idx, wait_for_sync in enumerate([None, False, True]):
amount = 128 + (512 * idx)
self.assertEqual(self.rapi.CountPending(), 0)
self.rapi.AddResponse("30783")
self.assertEqual(30783,
self.client.GrowInstanceDisk("eze8ch", idx, amount,
wait_for_sync=wait_for_sync))
self.assertHandler(rlib2.R_2_instances_name_disk_grow)
self.assertItems(["eze8ch", str(idx)])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
if wait_for_sync is None:
self.assertEqual(len(data), 1)
self.assert_("wait_for_sync" not in data)
else:
self.assertEqual(len(data), 2)
self.assertEqual(data["wait_for_sync"], wait_for_sync)
self.assertEqual(data["amount"], amount)
self.assertEqual(self.rapi.CountPending(), 0)
def testGetGroupTags(self):
self.rapi.AddResponse("[]")
self.assertEqual([], self.client.GetGroupTags("fooGroup"))
self.assertHandler(rlib2.R_2_groups_name_tags)
self.assertItems(["fooGroup"])
def testAddGroupTags(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234,
self.client.AddGroupTags("fooGroup", ["awesome"], dry_run=True))
self.assertHandler(rlib2.R_2_groups_name_tags)
self.assertItems(["fooGroup"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testDeleteGroupTags(self):
self.rapi.AddResponse("25826")
self.assertEqual(25826, self.client.DeleteGroupTags("foo", ["awesome"],
dry_run=True))
self.assertHandler(rlib2.R_2_groups_name_tags)
self.assertItems(["foo"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testQuery(self):
for idx, what in enumerate(constants.QR_VIA_RAPI):
for idx2, qfilter in enumerate([None, ["?", "name"]]):
job_id = 11010 + (idx << 4) + (idx2 << 16)
fields = sorted(query.ALL_FIELDS[what].keys())[:10]
self.rapi.AddResponse(str(job_id))
self.assertEqual(self.client.Query(what, fields, qfilter=qfilter),
job_id)
self.assertItems([what])
self.assertHandler(rlib2.R_2_query)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(data["fields"], fields)
if qfilter is None:
self.assertTrue("qfilter" not in data)
else:
self.assertEqual(data["qfilter"], qfilter)
self.assertEqual(self.rapi.CountPending(), 0)
def testQueryFields(self):
exp_result = objects.QueryFieldsResponse(fields=[
objects.QueryFieldDefinition(name="pnode", title="PNode",
kind=constants.QFT_NUMBER),
objects.QueryFieldDefinition(name="other", title="Other",
kind=constants.QFT_BOOL),
])
for what in constants.QR_VIA_RAPI:
for fields in [None, ["name", "_unknown_"], ["&", "?|"]]:
self.rapi.AddResponse(serializer.DumpJson(exp_result.ToDict()))
result = self.client.QueryFields(what, fields=fields)
self.assertItems([what])
self.assertHandler(rlib2.R_2_query_fields)
self.assertFalse(self.rapi.GetLastRequestData())
queryargs = self.rapi.GetLastHandler().queryargs
if fields is None:
self.assertFalse(queryargs)
else:
self.assertEqual(queryargs, {
"fields": [",".join(fields)],
})
self.assertEqual(objects.QueryFieldsResponse.FromDict(result).ToDict(),
exp_result.ToDict())
self.assertEqual(self.rapi.CountPending(), 0)
def testWaitForJobCompletionNoChange(self):
resp = serializer.DumpJson({
"status": constants.JOB_STATUS_WAITING,
})
for retries in [1, 5, 25]:
for _ in range(retries):
self.rapi.AddResponse(resp)
self.assertFalse(self.client.WaitForJobCompletion(22789, period=None,
retries=retries))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["22789"])
self.assertEqual(self.rapi.CountPending(), 0)
def testWaitForJobCompletionAlreadyFinished(self):
self.rapi.AddResponse(serializer.DumpJson({
"status": constants.JOB_STATUS_SUCCESS,
}))
self.assertTrue(self.client.WaitForJobCompletion(22793, period=None,
retries=1))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["22793"])
self.assertEqual(self.rapi.CountPending(), 0)
def testWaitForJobCompletionEmptyResponse(self):
self.rapi.AddResponse("{}")
self.assertFalse(self.client.WaitForJobCompletion(22793, period=None,
retries=10))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["22793"])
self.assertEqual(self.rapi.CountPending(), 0)
def testWaitForJobCompletionOutOfRetries(self):
for retries in [3, 10, 21]:
for _ in range(retries):
self.rapi.AddResponse(serializer.DumpJson({
"status": constants.JOB_STATUS_RUNNING,
}))
self.assertFalse(self.client.WaitForJobCompletion(30948, period=None,
retries=retries - 1))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["30948"])
self.assertEqual(self.rapi.CountPending(), 1)
self.rapi.ResetResponses()
def testWaitForJobCompletionSuccessAndFailure(self):
for retries in [1, 4, 13]:
for (success, end_status) in [(False, constants.JOB_STATUS_ERROR),
(True, constants.JOB_STATUS_SUCCESS)]:
for _ in range(retries):
self.rapi.AddResponse(serializer.DumpJson({
"status": constants.JOB_STATUS_RUNNING,
}))
self.rapi.AddResponse(serializer.DumpJson({
"status": end_status,
}))
result = self.client.WaitForJobCompletion(3187, period=None,
retries=retries + 1)
self.assertEqual(result, success)
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["3187"])
self.assertEqual(self.rapi.CountPending(), 0)
class RapiTestRunner(unittest.TextTestRunner):
def run(self, *args):
global _used_handlers
assert _used_handlers is None
_used_handlers = set()
try:
# Run actual tests
result = unittest.TextTestRunner.run(self, *args)
diff = (set(connector.CONNECTOR.values()) - _used_handlers -
_KNOWN_UNUSED)
if diff:
raise AssertionError("The following RAPI resources were not used by the"
" RAPI client: %r" % utils.CommaJoin(diff))
finally:
# Reset global variable
_used_handlers = None
return result
if __name__ == "__main__":
client.UsesRapiClient(testutils.GanetiTestProgram)(testRunner=RapiTestRunner)<|fim▁end|> | |
<|file_name|>Utils.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# encoding: utf-8
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# Thomas Nagy, 2005 (ita)
"""
Utilities, the stable ones are the following:
* h_file: compute a unique value for a file (hash), it uses
the module fnv if it is installed (see waf/utils/fnv & http://code.google.com/p/waf/wiki/FAQ)
else, md5 (see the python docs)
For large projects (projects with more than 15000 files) or slow hard disks and filesystems (HFS)
it is possible to use a hashing based on the path and the size (may give broken cache results)
The method h_file MUST raise an OSError if the file is a folder
import stat
def h_file(filename):
st = os.stat(filename)
if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
m = Utils.md5()
m.update(str(st.st_mtime))
m.update(str(st.st_size))
m.update(filename)
return m.digest()
To replace the function in your project, use something like this:
import Utils
Utils.h_file = h_file
* h_list
* h_fun
* get_term_cols
* ordered_dict
"""
import os, sys, imp, string, errno, traceback, inspect, re, shutil, datetime, gc
# In python 3.0 we can get rid of all this
try: from UserDict import UserDict
except ImportError: from collections import UserDict
if sys.hexversion >= 0x2060000 or os.name == 'java':
import subprocess as pproc
else:
import pproc
import Logs
from Constants import *
try:
from collections import deque
except ImportError:
class deque(list):
def popleft(self):
return self.pop(0)
is_win32 = sys.platform == 'win32'
try:
# defaultdict in python 2.5
from collections import defaultdict as DefaultDict
except ImportError:
class DefaultDict(dict):
def __init__(self, default_factory):
super(DefaultDict, self).__init__()
self.default_factory = default_factory
def __getitem__(self, key):
try:
return super(DefaultDict, self).__getitem__(key)
except KeyError:
value = self.default_factory()
self[key] = value
return value
class WafError(Exception):
def __init__(self, *args):
self.args = args
try:
self.stack = traceback.extract_stack()
except:
pass
Exception.__init__(self, *args)
def __str__(self):
return str(len(self.args) == 1 and self.args[0] or self.args)
class WscriptError(WafError):
def __init__(self, message, wscript_file=None):
if wscript_file:
self.wscript_file = wscript_file
self.wscript_line = None
else:
try:
(self.wscript_file, self.wscript_line) = self.locate_error()
except:
(self.wscript_file, self.wscript_line) = (None, None)
msg_file_line = ''
if self.wscript_file:
msg_file_line = "%s:" % self.wscript_file
if self.wscript_line:
msg_file_line += "%s:" % self.wscript_line
err_message = "%s error: %s" % (msg_file_line, message)
WafError.__init__(self, err_message)
def locate_error(self):
stack = traceback.extract_stack()
stack.reverse()
for frame in stack:
file_name = os.path.basename(frame[0])
is_wscript = (file_name == WSCRIPT_FILE or file_name == WSCRIPT_BUILD_FILE)
if is_wscript:
return (frame[0], frame[1])
return (None, None)
indicator = is_win32 and '\x1b[A\x1b[K%s%s%s\r' or '\x1b[K%s%s%s\r'
try:
from fnv import new as md5
import Constants
Constants.SIG_NIL = 'signofnv'
def h_file(filename):
m = md5()
try:
m.hfile(filename)
x = m.digest()
if x is None: raise OSError("not a file")
return x
except SystemError:
raise OSError("not a file" + filename)
except ImportError:
try:
try:
from hashlib import md5
except ImportError:
from md5 import md5
def h_file(filename):
f = open(filename, 'rb')
m = md5()
while (filename):
filename = f.read(100000)
m.update(filename)
f.close()
return m.digest()
except ImportError:
# portability fixes may be added elsewhere (although, md5 should be everywhere by now)
md5 = None
class ordered_dict(UserDict):
def __init__(self, dict = None):
self.allkeys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
self.allkeys.remove(key)
UserDict.__delitem__(self, key)
def __setitem__(self, key, item):
if key not in self.allkeys: self.allkeys.append(key)
UserDict.__setitem__(self, key, item)
def exec_command(s, **kw):
if 'log' in kw:
kw['stdout'] = kw['stderr'] = kw['log']
del(kw['log'])
kw['shell'] = isinstance(s, str)
try:
proc = pproc.Popen(s, **kw)
return proc.wait()
except OSError:
return -1
if is_win32:
def exec_command(s, **kw):
if 'log' in kw:
kw['stdout'] = kw['stderr'] = kw['log']
del(kw['log'])
kw['shell'] = isinstance(s, str)
if len(s) > 2000:
startupinfo = pproc.STARTUPINFO()
startupinfo.dwFlags |= pproc.STARTF_USESHOWWINDOW
kw['startupinfo'] = startupinfo
try:
if 'stdout' not in kw:
kw['stdout'] = pproc.PIPE
kw['stderr'] = pproc.PIPE
proc = pproc.Popen(s,**kw)
(stdout, stderr) = proc.communicate()
Logs.info(stdout)
if stderr:
Logs.error(stderr)
return proc.returncode
else:
proc = pproc.Popen(s,**kw)
return proc.wait()
except OSError:
return -1
listdir = os.listdir
if is_win32:
def listdir_win32(s):
if re.match('^[A-Za-z]:$', s):
# os.path.isdir fails if s contains only the drive name... (x:)
s += os.sep
if not os.path.isdir(s):
e = OSError()
e.errno = errno.ENOENT
raise e
return os.listdir(s)
listdir = listdir_win32
def waf_version(mini = 0x010000, maxi = 0x100000):
"Halts if the waf version is wrong"
ver = HEXVERSION
try: min_val = mini + 0
except TypeError: min_val = int(mini.replace('.', '0'), 16)
if min_val > ver:
Logs.error("waf version should be at least %s (%s found)" % (mini, ver))
sys.exit(0)
try: max_val = maxi + 0
except TypeError: max_val = int(maxi.replace('.', '0'), 16)
if max_val < ver:
Logs.error("waf version should be at most %s (%s found)" % (maxi, ver))
sys.exit(0)
def python_24_guard():
if sys.hexversion < 0x20400f0 or sys.hexversion >= 0x3000000:
raise ImportError("Waf requires Python >= 2.3 but the raw source requires Python 2.4, 2.5 or 2.6")
def ex_stack():
exc_type, exc_value, tb = sys.exc_info()
if Logs.verbose > 1:
exc_lines = traceback.format_exception(exc_type, exc_value, tb)
return ''.join(exc_lines)
return str(exc_value)<|fim▁hole|> return sth.split()
else:
return sth
g_loaded_modules = {}
"index modules by absolute path"
g_module=None
"the main module is special"
def load_module(file_path, name=WSCRIPT_FILE):
"this function requires an absolute path"
try:
return g_loaded_modules[file_path]
except KeyError:
pass
module = imp.new_module(name)
try:
code = readf(file_path, m='rU')
except (IOError, OSError):
raise WscriptError('Could not read the file %r' % file_path)
module.waf_hash_val = code
sys.path.insert(0, os.path.dirname(file_path))
try:
exec(compile(code, file_path, 'exec'), module.__dict__)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), file_path)
sys.path.pop(0)
g_loaded_modules[file_path] = module
return module
def set_main_module(file_path):
"Load custom options, if defined"
global g_module
g_module = load_module(file_path, 'wscript_main')
g_module.root_path = file_path
try:
g_module.APPNAME
except:
g_module.APPNAME = 'noname'
try:
g_module.VERSION
except:
g_module.VERSION = '1.0'
# note: to register the module globally, use the following:
# sys.modules['wscript_main'] = g_module
def to_hashtable(s):
"used for importing env files"
tbl = {}
lst = s.split('\n')
for line in lst:
if not line: continue
mems = line.split('=')
tbl[mems[0]] = mems[1]
return tbl
def get_term_cols():
"console width"
return 80
try:
import struct, fcntl, termios
except ImportError:
pass
else:
if Logs.got_tty:
def myfun():
dummy_lines, cols = struct.unpack("HHHH", \
fcntl.ioctl(sys.stderr.fileno(),termios.TIOCGWINSZ , \
struct.pack("HHHH", 0, 0, 0, 0)))[:2]
return cols
# we actually try the function once to see if it is suitable
try:
myfun()
except:
pass
else:
get_term_cols = myfun
rot_idx = 0
rot_chr = ['\\', '|', '/', '-']
"the rotation character in the progress bar"
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret = path.split('/')[2:]
ret[0] = '/' + ret[0]
return ret
return path.split('/')
re_sp = re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret = re.split(re_sp, path)[2:]
ret[0] = '\\' + ret[0]
return ret
return re.split(re_sp, path)
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif is_win32:
split_path = split_path_win32
def copy_attrs(orig, dest, names, only_if_set=False):
for a in to_list(names):
u = getattr(orig, a, ())
if u or not only_if_set:
setattr(dest, a, u)
def def_attrs(cls, **kw):
'''
set attributes for class.
@param cls [any class]: the class to update the given attributes in.
@param kw [dictionary]: dictionary of attributes names and values.
if the given class hasn't one (or more) of these attributes, add the attribute with its value to the class.
'''
for k, v in kw.iteritems():
if not hasattr(cls, k):
setattr(cls, k, v)
def quote_define_name(path):
fu = re.compile("[^a-zA-Z0-9]").sub("_", path)
fu = fu.upper()
return fu
def quote_whitespace(path):
return (path.strip().find(' ') > 0 and '"%s"' % path or path).replace('""', '"')
def trimquotes(s):
if not s: return ''
s = s.rstrip()
if s[0] == "'" and s[-1] == "'": return s[1:-1]
return s
def h_list(lst):
m = md5()
m.update(str(lst))
return m.digest()
def h_fun(fun):
try:
return fun.code
except AttributeError:
try:
h = inspect.getsource(fun)
except IOError:
h = "nocode"
try:
fun.code = h
except AttributeError:
pass
return h
def pprint(col, str, label='', sep=os.linesep):
"print messages in color"
sys.stderr.write("%s%s%s %s%s" % (Logs.colors(col), str, Logs.colors.NORMAL, label, sep))
def check_dir(dir):
"""If a folder doesn't exists, create it."""
try:
os.stat(dir)
except OSError:
try:
os.makedirs(dir)
except OSError, e:
raise WafError("Cannot create folder '%s' (original error: %s)" % (dir, e))
def cmd_output(cmd, **kw):
silent = False
if 'silent' in kw:
silent = kw['silent']
del(kw['silent'])
if 'e' in kw:
tmp = kw['e']
del(kw['e'])
kw['env'] = tmp
kw['shell'] = isinstance(cmd, str)
kw['stdout'] = pproc.PIPE
if silent:
kw['stderr'] = pproc.PIPE
try:
p = pproc.Popen(cmd, **kw)
output = p.communicate()[0]
except OSError, e:
raise ValueError(str(e))
if p.returncode:
if not silent:
msg = "command execution failed: %s -> %r" % (cmd, str(output))
raise ValueError(msg)
output = ''
return output
reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
def subst_vars(expr, params):
"substitute ${PREFIX}/bin in /usr/local/bin"
def repl_var(m):
if m.group(1):
return '\\'
if m.group(2):
return '$'
try:
# environments may contain lists
return params.get_flat(m.group(3))
except AttributeError:
return params[m.group(3)]
return reg_subst.sub(repl_var, expr)
def unversioned_sys_platform_to_binary_format(unversioned_sys_platform):
"infers the binary format from the unversioned_sys_platform name."
if unversioned_sys_platform in ('linux', 'freebsd', 'netbsd', 'openbsd', 'sunos'):
return 'elf'
elif unversioned_sys_platform == 'darwin':
return 'mac-o'
elif unversioned_sys_platform in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
# TODO we assume all other operating systems are elf, which is not true.
# we may set this to 'unknown' and have ccroot and other tools handle the case "gracefully" (whatever that means).
return 'elf'
def unversioned_sys_platform():
"""returns an unversioned name from sys.platform.
sys.plaform is not very well defined and depends directly on the python source tree.
The version appended to the names is unreliable as it's taken from the build environment at the time python was built,
i.e., it's possible to get freebsd7 on a freebsd8 system.
So we remove the version from the name, except for special cases where the os has a stupid name like os2 or win32.
Some possible values of sys.platform are, amongst others:
aix3 aix4 atheos beos5 darwin freebsd2 freebsd3 freebsd4 freebsd5 freebsd6 freebsd7
generic irix5 irix6 linux2 mac netbsd1 next3 os2emx riscos sunos5 unixware7
Investigating the python source tree may reveal more values.
"""
s = sys.platform
if s == 'java':
# The real OS is hidden under the JVM.
from java.lang import System
s = System.getProperty('os.name')
# see http://lopica.sourceforge.net/os.html for a list of possible values
if s == 'Mac OS X':
return 'darwin'
elif s.startswith('Windows '):
return 'win32'
elif s == 'OS/2':
return 'os2'
elif s == 'HP-UX':
return 'hpux'
elif s in ('SunOS', 'Solaris'):
return 'sunos'
else: s = s.lower()
if s == 'win32' or s.endswith('os2') and s != 'sunos2': return s
return re.split('\d+$', s)[0]
#@deprecated('use unversioned_sys_platform instead')
def detect_platform():
"""this function has been in the Utils module for some time.
It's hard to guess what people have used it for.
It seems its goal is to return an unversionned sys.platform, but it's not handling all platforms.
For example, the version is not removed on freebsd and netbsd, amongst others.
"""
s = sys.platform
# known POSIX
for x in 'cygwin linux irix sunos hpux aix darwin'.split():
# sys.platform may be linux2
if s.find(x) >= 0:
return x
# unknown POSIX
if os.name in 'posix java os2'.split():
return os.name
return s
def load_tool(tool, tooldir=None):
'''
load_tool: import a Python module, optionally using several directories.
@param tool [string]: name of tool to import.
@param tooldir [list]: directories to look for the tool.
@return: the loaded module.
Warning: this function is not thread-safe: plays with sys.path,
so must run in sequence.
'''
if tooldir:
assert isinstance(tooldir, list)
sys.path = tooldir + sys.path
try:
try:
return __import__(tool)
except ImportError, e:
Logs.error('Could not load the tool %r in %r:\n%s' % (tool, sys.path, e))
raise
finally:
if tooldir:
sys.path = sys.path[len(tooldir):]
def readf(fname, m='r'):
"get the contents of a file, it is not used anywhere for the moment"
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
return txt
def nada(*k, **kw):
"""A function that does nothing"""
pass
def diff_path(top, subdir):
"""difference between two absolute paths"""
top = os.path.normpath(top).replace('\\', '/').split('/')
subdir = os.path.normpath(subdir).replace('\\', '/').split('/')
if len(top) == len(subdir): return ''
diff = subdir[len(top) - len(subdir):]
return os.path.join(*diff)
class Context(object):
"""A base class for commands to be executed from Waf scripts"""
def set_curdir(self, dir):
self.curdir_ = dir
def get_curdir(self):
try:
return self.curdir_
except AttributeError:
self.curdir_ = os.getcwd()
return self.get_curdir()
curdir = property(get_curdir, set_curdir)
def recurse(self, dirs, name=''):
"""The function for calling scripts from folders, it tries to call wscript + function_name
and if that file does not exist, it will call the method 'function_name' from a file named wscript
the dirs can be a list of folders or a string containing space-separated folder paths
"""
if not name:
name = inspect.stack()[1][3]
if isinstance(dirs, str):
dirs = to_list(dirs)
for x in dirs:
if os.path.isabs(x):
nexdir = x
else:
nexdir = os.path.join(self.curdir, x)
base = os.path.join(nexdir, WSCRIPT_FILE)
file_path = base + '_' + name
try:
txt = readf(file_path, m='rU')
except (OSError, IOError):
try:
module = load_module(base)
except OSError:
raise WscriptError('No such script %s' % base)
try:
f = module.__dict__[name]
except KeyError:
raise WscriptError('No function %s defined in %s' % (name, base))
if getattr(self.__class__, 'pre_recurse', None):
self.pre_recurse(f, base, nexdir)
old = self.curdir
self.curdir = nexdir
try:
f(self)
finally:
self.curdir = old
if getattr(self.__class__, 'post_recurse', None):
self.post_recurse(module, base, nexdir)
else:
dc = {'ctx': self}
if getattr(self.__class__, 'pre_recurse', None):
dc = self.pre_recurse(txt, file_path, nexdir)
old = self.curdir
self.curdir = nexdir
try:
try:
exec(compile(txt, file_path, 'exec'), dc)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), base)
finally:
self.curdir = old
if getattr(self.__class__, 'post_recurse', None):
self.post_recurse(txt, file_path, nexdir)
if is_win32:
old = shutil.copy2
def copy2(src, dst):
old(src, dst)
shutil.copystat(src, src)
setattr(shutil, 'copy2', copy2)
def zip_folder(dir, zip_file_name, prefix):
"""
prefix represents the app to add in the archive
"""
import zipfile
zip = zipfile.ZipFile(zip_file_name, 'w', compression=zipfile.ZIP_DEFLATED)
base = os.path.abspath(dir)
if prefix:
if prefix[-1] != os.sep:
prefix += os.sep
n = len(base)
for root, dirs, files in os.walk(base):
for f in files:
archive_name = prefix + root[n:] + os.sep + f
zip.write(root + os.sep + f, archive_name, zipfile.ZIP_DEFLATED)
zip.close()
def get_elapsed_time(start):
"Format a time delta (datetime.timedelta) using the format DdHhMmS.MSs"
delta = datetime.datetime.now() - start
# cast to int necessary for python 3.0
days = int(delta.days)
hours = int(delta.seconds / 3600)
minutes = int((delta.seconds - hours * 3600) / 60)
seconds = delta.seconds - hours * 3600 - minutes * 60 \
+ float(delta.microseconds) / 1000 / 1000
result = ''
if days:
result += '%dd' % days
if days or hours:
result += '%dh' % hours
if days or hours or minutes:
result += '%dm' % minutes
return '%s%.3fs' % (result, seconds)
if os.name == 'java':
# For Jython (they should really fix the inconsistency)
try:
gc.disable()
gc.enable()
except NotImplementedError:
gc.disable = gc.enable<|fim▁end|> |
def to_list(sth):
if isinstance(sth, str): |
<|file_name|>testing.py<|end_file_name|><|fim▁begin|>"""
.15925 Editor
Copyright 2014 TechInvestLab.ru [email protected]
.15925 Editor is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3.0 of the License, or (at your option) any later version.
.15925 Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with .15925 Editor.
"""<|fim▁hole|>
from iso15926.tools.environment import EnvironmentContext
from PySide.QtCore import *
from PySide.QtGui import *
import os
from framework.dialogs import Choice
class TestWindow(QDialog):
vis_label = tm.main.tests_title
tests_dir = 'tests'
def __init__(self):
QDialog.__init__(self, appdata.topwindow, Qt.WindowSystemMenuHint | Qt.WindowTitleHint)
self.setWindowTitle(self.vis_label)
layout = QVBoxLayout(self)
box = QGroupBox(tm.main.tests_field, self)
self.tests_list = QListWidget(box)
boxlayout = QHBoxLayout(box)
boxlayout.addWidget(self.tests_list)
layout.addWidget(box)
for n in os.listdir(self.tests_dir):
if n.startswith(".") or not n.endswith('.py'):
continue
sp = os.path.splitext(n)
item = QListWidgetItem(sp[0], self.tests_list)
item.setCheckState(Qt.Unchecked)
self.btn_prepare = QPushButton(tm.main.prepare, self)
self.btn_prepare.setToolTip(tm.main.prepare_selected_tests)
self.btn_prepare.clicked.connect(self.OnPrepare)
self.btn_run = QPushButton(tm.main.run, self)
self.btn_run.setToolTip(tm.main.run_selected_tests)
self.btn_run.clicked.connect(self.OnRun)
self.btn_sel_all = QPushButton(tm.main.select_all, self)
self.btn_sel_all.clicked.connect(self.SelectAll)
self.btn_unsel_all = QPushButton(tm.main.unselect_all, self)
self.btn_unsel_all.clicked.connect(self.UnselectAll)
self.btn_cancel = QPushButton(tm.main.cancel, self)
self.btn_cancel.clicked.connect(self.reject)
btnlayout = QHBoxLayout()
btnlayout.addWidget(self.btn_sel_all)
btnlayout.addWidget(self.btn_unsel_all)
btnlayout.addStretch()
btnlayout.addWidget(self.btn_prepare)
btnlayout.addWidget(self.btn_run)
btnlayout.addWidget(self.btn_cancel)
layout.addLayout(btnlayout)
box = QGroupBox(tm.main.tests_result_field, self)
self.report = QPlainTextEdit(self)
boxlayout = QHBoxLayout(box)
boxlayout.addWidget(self.report)
layout.addWidget(box)
self.exec_()
def SelectAll(self):
self.tests_list.SetChecked([x for x in xrange(self.tests_list.Count)])
def UnselectAll(self):
self.tests_list.SetChecked([])
def OnPrepare(self):
if Choice(tm.main.tests_prepare_warning):
for k in self.tests_list.CheckedStrings:
self.report.AppendText(tm.main.tests_preparing.format(k))
locals = {'mode': 'prepare'}
ec = EnvironmentContext(None, locals)
ec.ExecutePythonFile(os.path.join(self.tests_dir, k + '.py'))
self.report.AppendText(tm.main.tests_preparing_done)
def OnRun(self):
all_passed = True
self.report.appendPlainText(tm.main.tests_running)
count = 0
passed = 0
for i in xrange(self.tests_list.count()):
item = self.tests_list.item(i)
name = item.text()
if not item.checkState() == Qt.Checked:
continue
count += 1
locals = {'mode': 'run', 'passed': False}
ec = EnvironmentContext(None, locals)
ec.ExecutePythonFile(os.path.join(self.tests_dir, name + '.py'))
if locals['passed']:
passed += 1
self.report.appendPlainText(tm.main.test_passed.format(name))
else:
self.report.appendPlainText(tm.main.test_failed.format(name))
self.report.appendPlainText(tm.main.tests_result)
self.report.appendPlainText(tm.main.tests_result_info.format(passed, count))
if os.path.exists(TestWindow.tests_dir):
@public('workbench.menu.help')
class xTestMenu:
vis_label = tm.main.menu_tests
@classmethod
def Do(cls):
TestWindow()<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.