prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>ar-DZ.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ // THIS CODE IS GENERATED - DO NOT MODIFY // See angular/tools/gulp-tasks/cldr/extract.js const u = undefined; function plural(n: number): number { if (n === 0) return 0; if (n === 1) return 1; if (n === 2) return 2; if (n % 100 === Math.floor(n % 100) && n % 100 >= 3 && n % 100 <= 10) return 3; if (n % 100 === Math.floor(n % 100) && n % 100 >= 11 && n % 100 <= 99) return 4; return 5; } export default [ 'ar-DZ', [['ص', 'م'], u, u], [['ص', 'م'], u, ['صباحًا', 'مساءً']],<|fim▁hole|> [ ['ح', 'ن', 'ث', 'ر', 'خ', 'ج', 'س'], [ 'الأحد', 'الاثنين', 'الثلاثاء', 'الأربعاء', 'الخميس', 'الجمعة', 'السبت' ], u, ['أحد', 'إثنين', 'ثلاثاء', 'أربعاء', 'خميس', 'جمعة', 'سبت'] ], u, [ ['ج', 'ف', 'م', 'أ', 'م', 'ج', 'ج', 'أ', 'س', 'أ', 'ن', 'د'], [ 'جانفي', 'فيفري', 'مارس', 'أفريل', 'ماي', 'جوان', 'جويلية', 'أوت', 'سبتمبر', 'أكتوبر', 'نوفمبر', 'ديسمبر' ], u ], u, [['ق.م', 'م'], u, ['قبل الميلاد', 'ميلادي']], 6, [5, 6], ['d\u200f/M\u200f/y', 'dd\u200f/MM\u200f/y', 'd MMMM y', 'EEEE، d MMMM y'], ['h:mm a', 'h:mm:ss a', 'h:mm:ss a z', 'h:mm:ss a zzzz'], ['{1} {0}', u, u, u], [ ',', '.', ';', '\u200e%\u200e', '\u200e+', '\u200e-', 'E', '×', '‰', '∞', 'ليس رقمًا', ':' ], ['#,##0.###', '#,##0%', '¤ #,##0.00', '#E0'], 'DZD', 'د.ج.\u200f', 'دينار جزائري', { 'AED': ['د.إ.\u200f'], 'ARS': [u, 'AR$'], 'AUD': ['AU$'], 'BBD': [u, 'BB$'], 'BHD': ['د.ب.\u200f'], 'BMD': [u, 'BM$'], 'BND': [u, 'BN$'], 'BSD': [u, 'BS$'], 'BZD': [u, 'BZ$'], 'CAD': ['CA$'], 'CLP': [u, 'CL$'], 'CNY': ['CN¥'], 'COP': [u, 'CO$'], 'CUP': [u, 'CU$'], 'DOP': [u, 'DO$'], 'DZD': ['د.ج.\u200f'], 'EGP': ['ج.م.\u200f', 'E£'], 'FJD': [u, 'FJ$'], 'GBP': ['UK£'], 'GYD': [u, 'GY$'], 'HKD': ['HK$'], 'IQD': ['د.ع.\u200f'], 'IRR': ['ر.إ.'], 'JMD': [u, 'JM$'], 'JOD': ['د.أ.\u200f'], 'JPY': ['JP¥'], 'KWD': ['د.ك.\u200f'], 'KYD': [u, 'KY$'], 'LBP': ['ل.ل.\u200f', 'L£'], 'LRD': [u, '$LR'], 'LYD': ['د.ل.\u200f'], 'MAD': ['د.م.\u200f'], 'MRU': ['أ.م.'], 'MXN': ['MX$'], 'NZD': ['NZ$'], 'OMR': ['ر.ع.\u200f'], 'QAR': ['ر.ق.\u200f'], 'SAR': ['ر.س.\u200f'], 'SBD': [u, 'SB$'], 'SDD': ['د.س.\u200f'], 'SDG': ['ج.س.'], 'SRD': [u, 'SR$'], 'SYP': ['ل.س.\u200f', '£'], 'THB': ['฿'], 'TND': ['د.ت.\u200f'], 'TTD': [u, 'TT$'], 'TWD': ['NT$'], 'USD': ['US$'], 'UYU': [u, 'UY$'], 'XXX': ['***'], 'YER': ['ر.ي.\u200f'] }, 'rtl', plural ];<|fim▁end|>
<|file_name|>config.js<|end_file_name|><|fim▁begin|><|fim▁hole|> * openircd, a lightweight ircd written in javascript v8 with nodejs. * http://www.openbrasil.org/ - rede do conhecimento livre. * * $Id$ */ exports.listen = { port: 6667, host: '0.0.0.0' }; exports.server = { name: "experimental.openbrasil.org", description: "servidor experimental openbrasil", }; exports.network = { name: "openbrasil" }; exports.general = { ping_timeout: 1 };<|fim▁end|>
/**
<|file_name|>test_heartrate_models.py<|end_file_name|><|fim▁begin|>import pandas as pd import pytest from athletic_pandas.algorithms import heartrate_models def test_heartrate_model(): heartrate = pd.Series(range(50)) power = pd.Series(range(0, 100, 2)) model, predictions = heartrate_models.heartrate_model(heartrate, power) assert model.params['hr_rest'].value == 0.00039182374117378518 assert model.params['hr_max'].value == 195.75616175654685<|fim▁hole|> assert model.params['hr_drift'].value == 6.7232899323328612 * 10**-5 assert len(predictions) == 50<|fim▁end|>
assert model.params['dhr'].value == 0.49914432620946803 assert model.params['tau_rise'].value == 0.98614419733274383 assert model.params['tau_fall'].value == 22.975975612579408
<|file_name|>gitignore.js<|end_file_name|><|fim▁begin|>(function(exports) {<|fim▁hole|> var content = grunt.file.read('build/.gitignore').replace(/composer.lock[\r\n]/m, ''); grunt.file.write('build/.gitignore', content); grunt.verbose.ok(); done(); }; })(typeof exports === 'object' && exports || this);<|fim▁end|>
"use strict"; exports.dontIgnoreComposerLockFile = function(grunt, init, done) { grunt.verbose.write("Removing composer.lock from .gitignore.");
<|file_name|>user.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from . import app, db<|fim▁hole|>from hashlib import md5 from Lotus.lib.msg_code import Msg import json @app.route('/user/login', methods=['POST']) def user_login(): email = request.form.get('email', None) psw = request.form.get('psw', None) if email is not None and psw is not None: users = User.query.filter_by(email=email, psw=psw) if users: g.user = users[0] session['userid'] = users[0].userid else: return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'user not exist') else: return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'params not enougth') @app.route('/user/register', methods=['POST']) def user_register(): # todo (参数不够)有插入异常怎么办? # todo 忘记密码.. try: u = User() u.username = request.form.get('username', None) u.description = request.form.get('description', None) u.type = request.form.get('type', User.CONST_TYPE_USER) u.email = request.form.get('email', None) m = md5() m.update(request.form.get('psw', User.CONST_DEFAULT_PASSWORD)) # 默认密码 u.psw = m.hexdigest() db.session.add(u) db.session.commit() except Exception as e: return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'register faild') return '{"code":%d,"msg":$s}'.format(Msg['success'], 'register success') @app.route('/user/<int:userid>/avatar', methods=['GET', 'POST']) def user_avatar(userid): #upload #TODO support upload avater if request.method == 'POST': pass else: pass @app.route('/user/<int:userid>/profile', methods=['GET']) def user_profile(userid): if session.get('userid'): result = { 'userid': g.user.userid, 'username': g.user.username, 'avatar': g.user.avatar, 'description': g.user.description, 'type': g.user.type, 'email': g.user.email } return json.dumps(result) else: redirect('/user/login') @app.route('/user/<int:userid>/issue/sends/page/<int:page>', methods=['GET']) def user_issues_send(userid, page): pass @app.route('/user/<int:userid>/issue/favours/page/<int:page>', methods=['GET']) def user_issues_favour(userid, page): pass @app.route('/user/<int:userid>/issue/favours/page/<int:page>', methods=['GET']) def user_messages(userid, page): pass<|fim▁end|>
from flask import request, g, session, redirect from Lotus.model.user import User
<|file_name|>GroupXmlExporterTest.java<|end_file_name|><|fim▁begin|>/* * Copyright 2006-2011 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.rice.kew.xml.export; import org.kuali.rice.core.api.CoreApiServiceLocator; import org.kuali.rice.kew.export.KewExportDataSet; import org.kuali.rice.kim.api.group.Group; import org.kuali.rice.kim.api.group.GroupService; import org.kuali.rice.kim.api.identity.IdentityService; import org.kuali.rice.kim.api.services.KimApiServiceLocator; import org.kuali.rice.test.BaselineTestCase; import java.util.List; import static org.junit.Assert.assertTrue; /** * This is a description of what this class does - jjhanso don't forget to fill this in. * * @author Kuali Rice Team ([email protected]) * */ @BaselineTestCase.BaselineMode(BaselineTestCase.Mode.NONE) public class GroupXmlExporterTest extends XmlExporterTestCase { /** * This overridden method ... * * @see org.kuali.rice.kew.xml.export.XmlExporterTestCase#assertExport() */ @Override protected void assertExport() throws Exception { IdentityService identityService = KimApiServiceLocator.getIdentityService(); GroupService groupService = KimApiServiceLocator.getGroupService(); List<? extends Group> oldGroups = groupService.getGroupsByPrincipalId( identityService.getPrincipalByPrincipalName("ewestfal").getPrincipalId()); KewExportDataSet dataSet = new KewExportDataSet(); dataSet.getGroups().addAll(oldGroups); byte[] xmlBytes = CoreApiServiceLocator.getXmlExporterService().export(dataSet.createExportDataSet()); assertTrue("XML should be non empty.", xmlBytes != null && xmlBytes.length > 0); StringBuffer output = new StringBuffer(); for (int i=0; i < xmlBytes.length; i++){ output.append((char)xmlBytes[i]); } <|fim▁hole|> //ClearDatabaseLifecycle clearLifeCycle = new ClearDatabaseLifecycle(); //clearLifeCycle.getTablesToClear().add("EN_RULE_BASE_VAL_T"); //clearLifeCycle.getTablesToClear().add("EN_RULE_ATTRIB_T"); //clearLifeCycle.getTablesToClear().add("EN_RULE_TMPL_T"); //clearLifeCycle.getTablesToClear().add("EN_DOC_TYP_T"); //clearLifeCycle.start(); //new ClearCacheLifecycle().stop(); //KimImplServiceLocator.getGroupService(). // import the exported xml //loadXmlStream(new BufferedInputStream(new ByteArrayInputStream(xmlBytes))); /* List newRules = KEWServiceLocator.getRuleService().fetchAllRules(true); assertEquals("Should have same number of old and new Rules.", oldRules.size(), newRules.size()); for (Iterator iterator = oldRules.iterator(); iterator.hasNext();) { RuleBaseValues oldRule = (RuleBaseValues) iterator.next(); boolean foundRule = false; for (Iterator iterator2 = newRules.iterator(); iterator2.hasNext();) { RuleBaseValues newRule = (RuleBaseValues) iterator2.next(); if (oldRule.getDescription().equals(newRule.getDescription())) { assertRuleExport(oldRule, newRule); foundRule = true; } } assertTrue("Could not locate the new rule for description " + oldRule.getDescription(), foundRule); } */ } }<|fim▁end|>
System.out.print(output.toString()); // now clear the tables
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python from setuptools import setup try: import unittest2 # noqa except ImportError: test_loader = 'unittest:TestLoader' else: test_loader = 'unittest2:TestLoader' setup( name='mockldap', version='0.1.8', description=u"A simple mock implementation of python-ldap.", long_description=open('README').read(), url='http://bitbucket.org/psagers/mockldap/', author='Peter Sagerson', author_email='[email protected]', license='BSD', packages=['mockldap'], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Programming Language :: Python', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: BSD License',<|fim▁hole|> ], keywords=['mock', 'ldap'], install_requires=[ 'python-ldap', 'funcparserlib==0.3.6', 'mock', ], extras_require={ 'passlib': ['passlib>=1.6.1'], }, setup_requires=[ 'setuptools>=0.6c11', ], test_loader=test_loader, test_suite='mockldap.tests', )<|fim▁end|>
'Topic :: Internet :: WWW/HTTP', 'Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP', 'Topic :: Software Development :: Libraries :: Python Modules',
<|file_name|>buster.js<|end_file_name|><|fim▁begin|>var config = this.window ? {} : module.exports; config.Rjs = { environment: "browser", rootPath: "../", sources: [ "build/Reality.combined.replaced.js", ], tests: [<|fim▁hole|><|fim▁end|>
"test/**/*.js" ] };
<|file_name|>asha-workers-details.service_20170223121119.ts<|end_file_name|><|fim▁begin|>import { Injectable } from '@angular/core'; import { Headers, Http } from '@angular/http'; import 'rxjs/add/operator/toPromise'; import { AshaWorkerBasicDetailsModel } from '../model/asha-worker-basic-details-model'; import { AshaWorkersList, DummyAshaWorkerDetails } from './temporary/temp-data'; import { AshaWorkerPaymentRulesModel } from '../model/asha-worker-payment-rules-model'; @Injectable() export class AshaWorkersDetailsService{ private url: string = "http://www.ashavizianagaram.in:81/ashaservices/webapi/getdetails/get_asha_worker_details"; constructor(private http: Http){} getAshaWorkersList(): Promise<AshaWorkerBasicDetailsModel[]>{ //return Promise.resolve(AshaWorkersList);<|fim▁hole|> .catch(this.handleError); } getDummyAshaWorkerDetails(): Promise<AshaWorkerBasicDetailsModel>{ return Promise.resolve(DummyAshaWorkerDetails); } updateAshaWorkerPaymentRules(): Promise<AshaWorkerPaymentRulesModel>{ let body = JSON.stringify(AshaWorkerPaymentRulesModel); return this.http.put(this.url,body) .toPromise() .then(response => response.json().success as AshaWorkerPaymentRulesModel) .catch(this.handleError) } private handleError(error: any):Promise<any>{ console.error('An error occured', error); return Promise.reject(error.message || error); } }<|fim▁end|>
return this.http.get(this.url) .toPromise() .then(response => response.json().awAshaDetailsModel as AshaWorkerBasicDetailsModel[])
<|file_name|>JSONUtil.java<|end_file_name|><|fim▁begin|>/* * Copyright 2016 Crown Copyright * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package stroom.util.client; import com.google.gwt.json.client.JSONArray; import com.google.gwt.json.client.JSONNumber; import com.google.gwt.json.client.JSONObject; import com.google.gwt.json.client.JSONParser; import com.google.gwt.json.client.JSONString; import com.google.gwt.json.client.JSONValue; public class JSONUtil { private JSONUtil() { // Utility class. } public static JSONValue parse(final String json) { if (json != null && !json.isEmpty()) { return JSONParser.parseStrict(json); } return null; } public static JSONObject getObject(final JSONValue v) { if (v != null) { return v.isObject(); } return null; } public static JSONArray getArray(final JSONValue v) { if (v != null) { return v.isArray(); } return null; } public static String getString(final JSONValue v) { if (v != null) { final JSONString jsonString = v.isString(); if (jsonString != null) { return jsonString.stringValue(); } } return null; } public static Integer getInteger(final JSONValue v) { if (v != null) { final JSONNumber jsonNumber = v.isNumber(); if (jsonNumber != null) { return Integer.valueOf((int) jsonNumber.doubleValue()); } } return null; } public static Double getDouble(final JSONValue v) { if (v != null) { final JSONNumber jsonNumber = v.isNumber(); if (jsonNumber != null) { return Double.valueOf(jsonNumber.doubleValue());<|fim▁hole|> return null; } public static String[] getStrings(final JSONValue v) { String[] strings = new String[0]; final JSONArray array = getArray(v); if (array != null) { strings = new String[array.size()]; for (int i = 0; i < array.size(); i++) { strings[i] = getString(array.get(i)); } } return strings; } }<|fim▁end|>
} }
<|file_name|>revidx.rs<|end_file_name|><|fim▁begin|>/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use std::ops::{Add, Mul}; use std::str::FromStr; use std::u32; /// Index into a `RevLog` #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct RevIdx(u32); // Implement `RevIdx`s methods impl RevIdx { /// Return index for first entry pub fn zero() -> Self { RevIdx(0) } /// Return successor index pub fn succ(self) -> Self { RevIdx(self.0 + 1) } /// Return previous index /// /// Panics if index is zero. pub fn pred(self) -> Self { assert!(self.0 > 0); RevIdx(self.0 - 1) } /// Return iterator for a range from index to `lim`. pub fn range_to(&self, lim: Self) -> RevIdxRange { RevIdxRange(self.0, lim.0) } /// Return an open ended iterator from index. pub fn range(&self) -> RevIdxRange { RevIdxRange(self.0, u32::MAX) } pub fn as_u32(&self) -> u32 { self.0 } } // Construct a `RevIdx` from a `u32` impl From<u32> for RevIdx { fn from(v: u32) -> Self { RevIdx(v) } } // Construct a `RevIdx` from a `usize` // Panics if the usize is larger than u32::MAX impl From<usize> for RevIdx { fn from(v: usize) -> Self { assert!(v <= u32::MAX as usize); RevIdx(v as u32) } } // Construct a `RevIdx` from a string (which may fail) impl FromStr for RevIdx { type Err = <u32 as FromStr>::Err; fn from_str(s: &str) -> Result<Self, Self::Err> { u32::from_str(s).map(RevIdx) } } // Multiply operator for RevIdx * usize -> usize // Used for constructing a byte offset for an index impl Mul<usize> for RevIdx { type Output = usize; <|fim▁hole|> fn mul(self, other: usize) -> Self::Output { self.0 as usize * other } } // RevIdx + usize -> RevIdx impl Add<usize> for RevIdx { type Output = RevIdx; fn add(self, other: usize) -> Self::Output { RevIdx((self.0 as usize + other) as u32) } } // Convert a `RevIdx` into an open-ended iterator of RevIdx values // starting at RevIdx's value. ie, RevIdx(2).into_iter() => RevIdx(2), RevIdx(3), ... impl<'a> IntoIterator for &'a RevIdx { type Item = RevIdx; type IntoIter = RevIdxRange; fn into_iter(self) -> Self::IntoIter { self.range() } } /// An open-ended or bounded iterator over a range of RevIdx #[derive(Copy, Clone, Debug)] pub struct RevIdxRange(u32, u32); impl Iterator for RevIdxRange { type Item = RevIdx; fn next(&mut self) -> Option<Self::Item> { if self.0 < self.1 { let ret = RevIdx(self.0); self.0 += 1; Some(ret) } else { None } } } #[cfg(test)] mod test { use super::*; use std::str::FromStr; #[test] fn zero() { assert_eq!(RevIdx::zero(), RevIdx(0)) } #[test] fn succ() { assert_eq!(RevIdx::zero().succ(), RevIdx(1)); assert_eq!(RevIdx::zero().succ().succ(), RevIdx(2)); } #[test] fn pred() { assert_eq!(RevIdx(10).pred(), RevIdx(9)); } #[test] #[should_panic] fn bad_pred() { println!("bad {:?}", RevIdx::zero().pred()); } #[test] fn range_to() { let v: Vec<_> = RevIdx::zero().range_to(RevIdx(5)).collect(); assert_eq!( v, vec![RevIdx(0), RevIdx(1), RevIdx(2), RevIdx(3), RevIdx(4)] ); } #[test] fn iter() { let v: Vec<_> = RevIdx::zero().into_iter().take(5).collect(); assert_eq!( v, vec![RevIdx(0), RevIdx(1), RevIdx(2), RevIdx(3), RevIdx(4)] ) } #[test] fn fromstr() { let idx: RevIdx = FromStr::from_str("555").expect("Valid string"); assert_eq!(idx, RevIdx(555)); } #[test] fn fromstr_bad1() { match RevIdx::from_str("abc123") { Ok(x) => panic!("unexpected success with {:?}", x), Err(err) => println!("ok {:?}", err), } } #[test] fn fromstr_bad2() { match RevIdx::from_str("-1") { Ok(x) => panic!("unexpected success with {:?}", x), Err(err) => println!("ok {:?}", err), } } }<|fim▁end|>
<|file_name|>core.py<|end_file_name|><|fim▁begin|># Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import abc import functools import os import uuid from oslo.config import cfg import six from keystone import clean from keystone.common import dependency from keystone.common import driver_hints from keystone.common import manager from keystone import config from keystone import exception from keystone.i18n import _ from keystone.identity.mapping_backends import mapping from keystone import notifications from keystone.openstack.common import importutils from keystone.openstack.common import log CONF = config.CONF LOG = log.getLogger(__name__) DOMAIN_CONF_FHEAD = 'keystone.' DOMAIN_CONF_FTAIL = '.conf' def filter_user(user_ref): """Filter out private items in a user dict. 'password', 'tenants' and 'groups' are never returned. :returns: user_ref """ if user_ref: user_ref = user_ref.copy() user_ref.pop('password', None) user_ref.pop('tenants', None) user_ref.pop('groups', None) user_ref.pop('domains', None) try: user_ref['extra'].pop('password', None) user_ref['extra'].pop('tenants', None)<|fim▁hole|> except KeyError: pass return user_ref class DomainConfigs(dict): """Discover, store and provide access to domain specific configs. The setup_domain_drivers() call will be made via the wrapper from the first call to any driver function handled by this manager. This setup call it will scan the domain config directory for files of the form keystone.<domain_name>.conf For each file, the domain_name will be turned into a domain_id and then this class will: - Create a new config structure, adding in the specific additional options defined in this config file - Initialise a new instance of the required driver with this new config. """ configured = False driver = None def _load_driver(self, assignment_api, domain_id): domain_config = self[domain_id] domain_config['driver'] = ( importutils.import_object( domain_config['cfg'].identity.driver, domain_config['cfg'])) domain_config['driver'].assignment_api = assignment_api def _load_config(self, assignment_api, file_list, domain_name): try: domain_ref = assignment_api.get_domain_by_name(domain_name) except exception.DomainNotFound: LOG.warning( _('Invalid domain name (%s) found in config file name'), domain_name) return # Create a new entry in the domain config dict, which contains # a new instance of both the conf environment and driver using # options defined in this set of config files. Later, when we # service calls via this Manager, we'll index via this domain # config dict to make sure we call the right driver domain = domain_ref['id'] self[domain] = {} self[domain]['cfg'] = cfg.ConfigOpts() config.configure(conf=self[domain]['cfg']) self[domain]['cfg'](args=[], project='keystone', default_config_files=file_list) self._load_driver(assignment_api, domain) def setup_domain_drivers(self, standard_driver, assignment_api): # This is called by the api call wrapper self.configured = True self.driver = standard_driver conf_dir = CONF.identity.domain_config_dir if not os.path.exists(conf_dir): LOG.warning(_('Unable to locate domain config directory: %s'), conf_dir) return for r, d, f in os.walk(conf_dir): for fname in f: if (fname.startswith(DOMAIN_CONF_FHEAD) and fname.endswith(DOMAIN_CONF_FTAIL)): if fname.count('.') >= 2: self._load_config(assignment_api, [os.path.join(r, fname)], fname[len(DOMAIN_CONF_FHEAD): -len(DOMAIN_CONF_FTAIL)]) else: LOG.debug(('Ignoring file (%s) while scanning domain ' 'config directory'), fname) def get_domain_driver(self, domain_id): if domain_id in self: return self[domain_id]['driver'] def get_domain_conf(self, domain_id): if domain_id in self: return self[domain_id]['cfg'] def reload_domain_driver(self, assignment_api, domain_id): # Only used to support unit tests that want to set # new config values. This should only be called once # the domains have been configured, since it relies on # the fact that the configuration files have already been # read. if self.configured: if domain_id in self: self._load_driver(assignment_api, domain_id) else: # The standard driver self.driver = self.driver() self.driver.assignment_api = assignment_api def domains_configured(f): """Wraps API calls to lazy load domain configs after init. This is required since the assignment manager needs to be initialized before this manager, and yet this manager's init wants to be able to make assignment calls (to build the domain configs). So instead, we check if the domains have been initialized on entry to each call, and if requires load them, """ @functools.wraps(f) def wrapper(self, *args, **kwargs): if (not self.domain_configs.configured and CONF.identity.domain_specific_drivers_enabled): self.domain_configs.setup_domain_drivers( self.driver, self.assignment_api) return f(self, *args, **kwargs) return wrapper def exception_translated(exception_type): """Wraps API calls to map to correct exception.""" def _exception_translated(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): try: return f(self, *args, **kwargs) except exception.PublicIDNotFound as e: if exception_type == 'user': raise exception.UserNotFound(user_id=e.message) elif exception_type == 'group': raise exception.GroupNotFound(group_id=e.message) elif exception_type == 'assertion': raise AssertionError(_('Invalid user / password')) else: raise return wrapper return _exception_translated @dependency.provider('identity_api') @dependency.optional('revoke_api') @dependency.requires('assignment_api', 'credential_api', 'id_mapping_api', 'token_api') class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. This class also handles the support of domain specific backends, by using the DomainConfigs class. The setup call for DomainConfigs is called from with the @domains_configured wrapper in a lazy loading fashion to get around the fact that we can't satisfy the assignment api it needs from within our __init__() function since the assignment driver is not itself yet initialized. Each of the identity calls are pre-processed here to choose, based on domain, which of the drivers should be called. The non-domain-specific driver is still in place, and is used if there is no specific driver for the domain in question (or we are not using multiple domain drivers). Starting with Juno, in order to be able to obtain the domain from just an ID being presented as part of an API call, a public ID to domain and local ID mapping is maintained. This mapping also allows for the local ID of drivers that do not provide simple UUIDs (such as LDAP) to be referenced via a public facing ID. The mapping itself is automatically generated as entities are accessed via the driver. This mapping is only used when: - the entity is being handled by anything other than the default driver, or - the entity is being handled by the default LDAP driver and backward compatible IDs are not required. This means that in the standard case of a single SQL backend or the default settings of a single LDAP backend (since backward compatible IDs is set to True by default), no mapping is used. An alternative approach would be to always use the mapping table, but in the cases where we don't need it to make the public and local IDs the same. It is felt that not using the mapping by default is a more prudent way to introduce this functionality. """ _USER = 'user' _GROUP = 'group' def __init__(self): super(Manager, self).__init__(CONF.identity.driver) self.domain_configs = DomainConfigs() # Domain ID normalization methods def _set_domain_id_and_mapping(self, ref, domain_id, driver, entity_type): """Patch the domain_id/public_id into the resulting entity(ies). :param ref: the entity or list of entities to post process :param domain_id: the domain scope used for the call :param driver: the driver used to execute the call :param entity_type: whether this is a user or group :returns: post processed entity or list or entities Called to post-process the entity being returned, using a mapping to substitute a public facing ID as necessary. This method must take into account: - If the driver is not domain aware, then we must set the domain attribute of all entities irrespective of mapping. - If the driver does not support UUIDs, then we always want to provide a mapping, except for the special case of this being the default driver and backward_compatible_ids is set to True. This is to ensure that entity IDs do not change for an existing LDAP installation (only single domain/driver LDAP configurations were previously supported). - If the driver does support UUIDs, then we always create a mapping entry, but use the local UUID as the public ID. The exception to - this is that if we just have single driver (i.e. not using specific multi-domain configs), then we don't both with the mapping at all. """ conf = CONF.identity if (driver is self.driver and driver.generates_uuids() and driver.is_domain_aware()): # The default driver that needs no help, e.g. SQL return ref LOG.debug('ID Mapping - Domain ID: %(domain)s, ' 'Default Driver: %(driver)s, ' 'Domains: %(aware)s, UUIDs: %(generate)s, ' 'Compatible IDs: %(compat)s', {'domain': domain_id, 'driver': (driver == self.driver), 'aware': driver.is_domain_aware(), 'generate': driver.generates_uuids(), 'compat': CONF.identity_mapping.backward_compatible_ids}) if isinstance(ref, dict): LOG.debug('Local ID: %s', ref['id']) ref = ref.copy() # If the driver can't handle domains, then we need to insert the # domain_id into the entity being returned. If the domain_id is # None that means we are running in a single backend mode, so to # remain backwardly compatible, we put in the default domain ID. if not driver.is_domain_aware(): if domain_id is None: domain_id = conf.default_domain_id ref['domain_id'] = domain_id # There are two situations where we must now use the mapping: # - this isn't the default driver (i.e. multiple backends), or # - we have a single backend that doesn't use UUIDs # The exception to the above is that we must honor backward # compatibility if this is the default driver (e.g. to support # current LDAP) if (driver is not self.driver or (not driver.generates_uuids() and not CONF.identity_mapping.backward_compatible_ids)): local_entity = {'domain_id': ref['domain_id'], 'local_id': ref['id'], 'entity_type': entity_type} public_id = self.id_mapping_api.get_public_id(local_entity) if public_id: ref['id'] = public_id LOG.debug('Found existing mapping to public ID: %s', ref['id']) else: # Need to create a mapping. If the driver generates UUIDs # then pass the local UUID in as the public ID to use. if driver.generates_uuids(): public_id = ref['id'] ref['id'] = self.id_mapping_api.create_id_mapping( local_entity, public_id) LOG.debug('Created new mapping to public ID: %s', ref['id']) return ref elif isinstance(ref, list): return [self._set_domain_id_and_mapping( x, domain_id, driver, entity_type) for x in ref] else: raise ValueError(_('Expected dict or list: %s') % type(ref)) def _clear_domain_id_if_domain_unaware(self, driver, ref): """Clear domain_id details if driver is not domain aware.""" if not driver.is_domain_aware() and 'domain_id' in ref: ref = ref.copy() ref.pop('domain_id') return ref def _select_identity_driver(self, domain_id): """Choose a backend driver for the given domain_id. :param domain_id: The domain_id for which we want to find a driver. If the domain_id is specified as None, then this means we need a driver that handles multiple domains. :returns: chosen backend driver If there is a specific driver defined for this domain then choose it. If the domain is None, or there no specific backend for the given domain is found, then we chose the default driver. """ if domain_id is None: driver = self.driver else: driver = (self.domain_configs.get_domain_driver(domain_id) or self.driver) # If the driver is not domain aware (e.g. LDAP) then check to # ensure we are not mapping multiple domains onto it - the only way # that would happen is that the default driver is LDAP and the # domain is anything other than None or the default domain. if (not driver.is_domain_aware() and driver == self.driver and domain_id != CONF.identity.default_domain_id and domain_id is not None): LOG.warning('Found multiple domains being mapped to a ' 'driver that does not support that (e.g. ' 'LDAP) - Domain ID: %(domain)s, ' 'Default Driver: %(driver)s', {'domain': domain_id, 'driver': (driver == self.driver)}) raise exception.DomainNotFound(domain_id=domain_id) return driver def _get_domain_driver_and_entity_id(self, public_id): """Look up details using the public ID. :param public_id: the ID provided in the call :returns: domain_id, which can be None to indicate that the driver in question supports multiple domains driver selected based on this domain entity_id which will is understood by the driver. Use the mapping table to look up the domain, driver and local entity that is represented by the provided public ID. Handle the situations were we do not use the mapping (e.g. single driver that understands UUIDs etc.) """ conf = CONF.identity # First, since we don't know anything about the entity yet, we must # assume it needs mapping, so long as we are using domain specific # drivers. if conf.domain_specific_drivers_enabled: local_id_ref = self.id_mapping_api.get_id_mapping(public_id) if local_id_ref: return ( local_id_ref['domain_id'], self._select_identity_driver(local_id_ref['domain_id']), local_id_ref['local_id']) # So either we are using multiple drivers but the public ID is invalid # (and hence was not found in the mapping table), or the public ID is # being handled by the default driver. Either way, the only place left # to look is in that standard driver. However, we don't yet know if # this driver also needs mapping (e.g. LDAP in non backward # compatibility mode). driver = self.driver if driver.generates_uuids(): if driver.is_domain_aware: # No mapping required, and the driver can handle the domain # information itself. The classic case of this is the # current SQL driver. return (None, driver, public_id) else: # Although we don't have any drivers of this type, i.e. that # understand UUIDs but not domains, conceptually you could. return (conf.default_domain_id, driver, public_id) # So the only place left to find the ID is in the default driver which # we now know doesn't generate UUIDs if not CONF.identity_mapping.backward_compatible_ids: # We are not running in backward compatibility mode, so we # must use a mapping. local_id_ref = self.id_mapping_api.get_id_mapping(public_id) if local_id_ref: return ( local_id_ref['domain_id'], driver, local_id_ref['local_id']) else: raise exception.PublicIDNotFound(id=public_id) # If we reach here, this means that the default driver # requires no mapping - but also doesn't understand domains # (e.g. the classic single LDAP driver situation). Hence we pass # back the public_ID unmodified and use the default domain (to # keep backwards compatibility with existing installations). # # It is still possible that the public ID is just invalid in # which case we leave this to the caller to check. return (conf.default_domain_id, driver, public_id) def _assert_user_and_group_in_same_backend( self, user_entity_id, user_driver, group_entity_id, group_driver): """Ensures that user and group IDs are backed by the same backend. Raise a CrossBackendNotAllowed exception if they are not from the same backend, otherwise return None. """ if user_driver is not group_driver: # Determine first if either IDs don't exist by calling # the driver.get methods (which will raise a NotFound # exception). user_driver.get_user(user_entity_id) group_driver.get_group(group_entity_id) # If we get here, then someone is attempting to create a cross # backend membership, which is not allowed. raise exception.CrossBackendNotAllowed(group_id=group_entity_id, user_id=user_entity_id) def _mark_domain_id_filter_satisfied(self, hints): if hints: for filter in hints.filters: if (filter['name'] == 'domain_id' and filter['comparator'] == 'equals'): hints.filters.remove(filter) def _ensure_domain_id_in_hints(self, hints, domain_id): if (domain_id is not None and not hints.get_exact_filter_by_name('domain_id')): hints.add_filter('domain_id', domain_id) # The actual driver calls - these are pre/post processed here as # part of the Manager layer to make sure we: # # - select the right driver for this domain # - clear/set domain_ids for drivers that do not support domains # - create any ID mapping that might be required @notifications.emit_event('authenticate') @domains_configured @exception_translated('assertion') def authenticate(self, context, user_id, password): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(user_id)) ref = driver.authenticate(entity_id, password) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER) @notifications.created(_USER, result_id_arg_attr='id') @domains_configured @exception_translated('user') def create_user(self, user_ref): user = user_ref.copy() user['name'] = clean.user_name(user['name']) user.setdefault('enabled', True) user['enabled'] = clean.user_enabled(user['enabled']) domain_id = user['domain_id'] self.assignment_api.get_domain(domain_id) # For creating a user, the domain is in the object itself domain_id = user_ref['domain_id'] driver = self._select_identity_driver(domain_id) user = self._clear_domain_id_if_domain_unaware(driver, user) # Generate a local ID - in the future this might become a function of # the underlying driver so that it could conform to rules set down by # that particular driver type. user['id'] = uuid.uuid4().hex ref = driver.create_user(user['id'], user) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER) @domains_configured @exception_translated('user') def get_user(self, user_id): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(user_id)) ref = driver.get_user(entity_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER) def assert_user_enabled(self, user_id, user=None): """Assert the user and the user's domain are enabled. :raise AssertionError if the user or the user's domain is disabled. """ if user is None: user = self.get_user(user_id) self.assignment_api.assert_domain_enabled(user['domain_id']) if not user.get('enabled', True): raise AssertionError(_('User is disabled: %s') % user_id) @domains_configured @exception_translated('user') def get_user_by_name(self, user_name, domain_id): driver = self._select_identity_driver(domain_id) ref = driver.get_user_by_name(user_name, domain_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER) @manager.response_truncated @domains_configured @exception_translated('user') def list_users(self, domain_scope=None, hints=None): driver = self._select_identity_driver(domain_scope) hints = hints or driver_hints.Hints() if driver.is_domain_aware(): # Force the domain_scope into the hint to ensure that we only get # back domains for that scope. self._ensure_domain_id_in_hints(hints, domain_scope) else: # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter. self._mark_domain_id_filter_satisfied(hints) ref_list = driver.list_users(hints) return self._set_domain_id_and_mapping( ref_list, domain_scope, driver, mapping.EntityType.USER) @notifications.updated(_USER) @domains_configured @exception_translated('user') def update_user(self, user_id, user_ref): user = user_ref.copy() if 'name' in user: user['name'] = clean.user_name(user['name']) if 'enabled' in user: user['enabled'] = clean.user_enabled(user['enabled']) if 'domain_id' in user: self.assignment_api.get_domain(user['domain_id']) if 'id' in user: if user_id != user['id']: raise exception.ValidationError(_('Cannot change user ID')) # Since any ID in the user dict is now irrelevant, remove its so as # the driver layer won't be confused by the fact the this is the # public ID not the local ID user.pop('id') domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(user_id)) user = self._clear_domain_id_if_domain_unaware(driver, user) ref = driver.update_user(entity_id, user) if user.get('enabled') is False or user.get('password') is not None: if self.revoke_api: self.revoke_api.revoke_by_user(user_id) self.token_api.delete_tokens_for_user(user_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER) @notifications.deleted(_USER) @domains_configured @exception_translated('user') def delete_user(self, user_id): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(user_id)) driver.delete_user(entity_id) self.credential_api.delete_credentials_for_user(user_id) self.token_api.delete_tokens_for_user(user_id) self.id_mapping_api.delete_id_mapping(user_id) @notifications.created(_GROUP, result_id_arg_attr='id') @domains_configured @exception_translated('group') def create_group(self, group_ref): group = group_ref.copy() group.setdefault('description', '') domain_id = group['domain_id'] self.assignment_api.get_domain(domain_id) # For creating a group, the domain is in the object itself domain_id = group_ref['domain_id'] driver = self._select_identity_driver(domain_id) group = self._clear_domain_id_if_domain_unaware(driver, group) # Generate a local ID - in the future this might become a function of # the underlying driver so that it could conform to rules set down by # that particular driver type. group['id'] = uuid.uuid4().hex ref = driver.create_group(group['id'], group) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP) @domains_configured @exception_translated('group') def get_group(self, group_id): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(group_id)) ref = driver.get_group(entity_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP) @notifications.updated(_GROUP) @domains_configured @exception_translated('group') def update_group(self, group_id, group): if 'domain_id' in group: self.assignment_api.get_domain(group['domain_id']) domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(group_id)) group = self._clear_domain_id_if_domain_unaware(driver, group) ref = driver.update_group(entity_id, group) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP) def revoke_tokens_for_group(self, group_id): # We get the list of users before we attempt the group # deletion, so that we can remove these tokens after we know # the group deletion succeeded. # TODO(ayoung): revoke based on group and roleids instead user_ids = [] for u in self.list_users_in_group(group_id): user_ids.append(u['id']) if self.revoke_api: self.revoke_api.revoke_by_user(u['id']) self.token_api.delete_tokens_for_users(user_ids) @notifications.deleted(_GROUP) @domains_configured @exception_translated('group') def delete_group(self, group_id): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(group_id)) # As well as deleting the group, we need to invalidate # any tokens for the users who are members of the group. self.revoke_tokens_for_group(group_id) driver.delete_group(entity_id) self.id_mapping_api.delete_id_mapping(group_id) @domains_configured @exception_translated('group') def add_user_to_group(self, user_id, group_id): @exception_translated('user') def get_entity_info_for_user(public_id): return self._get_domain_driver_and_entity_id(public_id) _domain_id, group_driver, group_entity_id = ( self._get_domain_driver_and_entity_id(group_id)) # Get the same info for the user_id, taking care to map any # exceptions correctly _domain_id, user_driver, user_entity_id = ( get_entity_info_for_user(user_id)) self._assert_user_and_group_in_same_backend( user_entity_id, user_driver, group_entity_id, group_driver) group_driver.add_user_to_group(user_entity_id, group_entity_id) self.token_api.delete_tokens_for_user(user_id) @domains_configured @exception_translated('group') def remove_user_from_group(self, user_id, group_id): @exception_translated('user') def get_entity_info_for_user(public_id): return self._get_domain_driver_and_entity_id(public_id) _domain_id, group_driver, group_entity_id = ( self._get_domain_driver_and_entity_id(group_id)) # Get the same info for the user_id, taking care to map any # exceptions correctly _domain_id, user_driver, user_entity_id = ( get_entity_info_for_user(user_id)) self._assert_user_and_group_in_same_backend( user_entity_id, user_driver, group_entity_id, group_driver) group_driver.remove_user_from_group(user_entity_id, group_entity_id) # TODO(ayoung) revoking all tokens for a user based on group # membership is overkill, as we only would need to revoke tokens # that had role assignments via the group. Calculating those # assignments would have to be done by the assignment backend. if self.revoke_api: self.revoke_api.revoke_by_user(user_id) self.token_api.delete_tokens_for_user(user_id) @manager.response_truncated @domains_configured @exception_translated('user') def list_groups_for_user(self, user_id, hints=None): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(user_id)) hints = hints or driver_hints.Hints() if not driver.is_domain_aware(): # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter self._mark_domain_id_filter_satisfied(hints) ref_list = driver.list_groups_for_user(entity_id, hints) return self._set_domain_id_and_mapping( ref_list, domain_id, driver, mapping.EntityType.GROUP) @manager.response_truncated @domains_configured @exception_translated('group') def list_groups(self, domain_scope=None, hints=None): driver = self._select_identity_driver(domain_scope) hints = hints or driver_hints.Hints() if driver.is_domain_aware(): # Force the domain_scope into the hint to ensure that we only get # back domains for that scope. self._ensure_domain_id_in_hints(hints, domain_scope) else: # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter. self._mark_domain_id_filter_satisfied(hints) ref_list = driver.list_groups(hints) return self._set_domain_id_and_mapping( ref_list, domain_scope, driver, mapping.EntityType.GROUP) @manager.response_truncated @domains_configured @exception_translated('group') def list_users_in_group(self, group_id, hints=None): domain_id, driver, entity_id = ( self._get_domain_driver_and_entity_id(group_id)) hints = hints or driver_hints.Hints() if not driver.is_domain_aware(): # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter self._mark_domain_id_filter_satisfied(hints) ref_list = driver.list_users_in_group(entity_id, hints) return self._set_domain_id_and_mapping( ref_list, domain_id, driver, mapping.EntityType.USER) @domains_configured @exception_translated('group') def check_user_in_group(self, user_id, group_id): @exception_translated('user') def get_entity_info_for_user(public_id): return self._get_domain_driver_and_entity_id(public_id) _domain_id, group_driver, group_entity_id = ( self._get_domain_driver_and_entity_id(group_id)) # Get the same info for the user_id, taking care to map any # exceptions correctly _domain_id, user_driver, user_entity_id = ( get_entity_info_for_user(user_id)) self._assert_user_and_group_in_same_backend( user_entity_id, user_driver, group_entity_id, group_driver) return group_driver.check_user_in_group(user_entity_id, group_entity_id) @domains_configured def change_password(self, context, user_id, original_password, new_password): # authenticate() will raise an AssertionError if authentication fails self.authenticate(context, user_id, original_password) update_dict = {'password': new_password} self.update_user(user_id, update_dict) @six.add_metaclass(abc.ABCMeta) class Driver(object): """Interface description for an Identity driver.""" def _get_list_limit(self): return CONF.identity.list_limit or CONF.list_limit def is_domain_aware(self): """Indicates if Driver supports domains.""" return True def generates_uuids(self): """Indicates if Driver generates UUIDs as the local entity ID.""" return True @abc.abstractmethod def authenticate(self, user_id, password): """Authenticate a given user and password. :returns: user_ref :raises: AssertionError """ raise exception.NotImplemented() # pragma: no cover # user crud @abc.abstractmethod def create_user(self, user_id, user): """Creates a new user. :raises: keystone.exception.Conflict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_users(self, hints): """List users in the system. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_users_in_group(self, group_id, hints): """List users in a group. :param group_id: the group in question :param hints: filter hints which the driver should implement if at all possible. :returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_user(self, user_id): """Get a user by ID. :returns: user_ref :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_user(self, user_id, user): """Updates an existing user. :raises: keystone.exception.UserNotFound, keystone.exception.Conflict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def add_user_to_group(self, user_id, group_id): """Adds a user to a group. :raises: keystone.exception.UserNotFound, keystone.exception.GroupNotFound """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def check_user_in_group(self, user_id, group_id): """Checks if a user is a member of a group. :raises: keystone.exception.UserNotFound, keystone.exception.GroupNotFound """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def remove_user_from_group(self, user_id, group_id): """Removes a user from a group. :raises: keystone.exception.NotFound """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_user(self, user_id): """Deletes an existing user. :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_user_by_name(self, user_name, domain_id): """Get a user by name. :returns: user_ref :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() # pragma: no cover # group crud @abc.abstractmethod def create_group(self, group_id, group): """Creates a new group. :raises: keystone.exception.Conflict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_groups(self, hints): """List groups in the system. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of group_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_groups_for_user(self, user_id, hints): """List groups a user is in :param user_id: the user in question :param hints: filter hints which the driver should implement if at all possible. :returns: a list of group_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_group(self, group_id): """Get a group by ID. :returns: group_ref :raises: keystone.exception.GroupNotFound """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_group(self, group_id, group): """Updates an existing group. :raises: keystone.exceptionGroupNotFound, keystone.exception.Conflict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_group(self, group_id): """Deletes an existing group. :raises: keystone.exception.GroupNotFound """ raise exception.NotImplemented() # pragma: no cover # end of identity @dependency.provider('id_mapping_api') class MappingManager(manager.Manager): """Default pivot point for the ID Mapping backend.""" def __init__(self): super(MappingManager, self).__init__(CONF.identity_mapping.driver) @six.add_metaclass(abc.ABCMeta) class MappingDriver(object): """Interface description for an ID Mapping driver.""" @abc.abstractmethod def get_public_id(self, local_entity): """Returns the public ID for the given local entity. :param dict local_entity: Containing the entity domain, local ID and type ('user' or 'group'). :returns: public ID, or None if no mapping is found. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_id_mapping(self, public_id): """Returns the local mapping. :param public_id: The public ID for the mapping required. :returns dict: Containing the entity domain, local ID and type. If no mapping is found, it returns None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_id_mapping(self, local_entity, public_id=None): """Create and store a mapping to a public_id. :param dict local_entity: Containing the entity domain, local ID and type ('user' or 'group'). :param public_id: If specified, this will be the public ID. If this is not specified, a public ID will be generated. :returns: public ID """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_id_mapping(self, public_id): """Deletes an entry for the given public_id. :param public_id: The public ID for the mapping to be deleted. The method is silent if no mapping is found. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def purge_mappings(self, purge_filter): """Purge selected identity mappings. :param dict purge_filter: Containing the attributes of the filter that defines which entries to purge. An empty filter means purge all mappings. """ raise exception.NotImplemented() # pragma: no cover<|fim▁end|>
<|file_name|>signal-exit-status.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-windows #![feature(old_io)] #![feature(os)] use std::env; use std::old_io::process::{Command, ExitSignal, ExitStatus};<|fim▁hole|> // Raise a segfault. unsafe { *(0 as *mut isize) = 0; } } else { let status = Command::new(&args[0]).arg("signal").status().unwrap(); // Windows does not have signal, so we get exit status 0xC0000028 (STATUS_BAD_STACK). match status { ExitSignal(_) if cfg!(unix) => {}, ExitStatus(0xC0000028) if cfg!(windows) => {}, _ => panic!("invalid termination (was not signalled): {}", status) } } }<|fim▁end|>
pub fn main() { let args: Vec<String> = env::args().collect(); if args.len() >= 2 && args[1] == "signal" {
<|file_name|>read_rgb.py<|end_file_name|><|fim▁begin|>import json from PIL import Image import collections with open('../config/nodes.json') as data_file: nodes = json.load(data_file) # empty fucker<|fim▁hole|>ordered_nodes = [None] * len(nodes) # populate fucker for i, pos in nodes.items(): ordered_nodes[int(i)] = [pos['x'], pos['y']] filename = "04_rgb_vertical_lines" im = Image.open("../gif_generators/output/"+filename+".gif") #Can be many different formats. target_size = 400, 400 resize = False if target_size != im.size: resize = True data = [] # To iterate through the entire gif try: frame_num = 0 while True: im.seek(frame_num) frame_data = [] # do something to im img = im.convert('RGB') if resize == True: print "Resizing" img.thumbnail(target_size, Image.ANTIALIAS) for x, y in ordered_nodes: frame_data.append(img.getpixel((x, y))) #print r, g, b data.append(frame_data) # write to json print frame_num frame_num+=1 except EOFError: pass # end of sequence #print data #print r, g, b with open(filename+'.json', 'w') as outfile: json.dump({ "meta": {}, "data": data }, outfile) print im.size #Get the width and hight of the image for iterating over #print pix[,y] #Get the RGBA Value of the a pixel of an image<|fim▁end|>
<|file_name|>DoiClientTest.java<|end_file_name|><|fim▁begin|>/******************************************************************************* * Australian National University Data Commons * Copyright (C) 2013 The Australian National University * * This file is part of Australian National University Data Commons. * * Australian National University Data Commons is free software: you * can redistribute it and/or modify it under the terms of the GNU * General Public License as published by the Free Software Foundation, * either version 3 of the License, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package au.edu.anu.datacommons.doi; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.StringWriter; import java.net.URI; import javax.ws.rs.core.UriBuilder; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Marshaller; import javax.xml.bind.Unmarshaller; import org.datacite.schema.kernel_4.Resource; import org.datacite.schema.kernel_4.Resource.Creators; import org.datacite.schema.kernel_4.Resource.Creators.Creator; import org.datacite.schema.kernel_4.Resource.Creators.Creator.CreatorName; import org.datacite.schema.kernel_4.Resource.Identifier; import org.datacite.schema.kernel_4.Resource.Titles; import org.datacite.schema.kernel_4.Resource.Titles.Title; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.sun.jersey.test.framework.JerseyTest; public class DoiClientTest extends JerseyTest { private static final Logger LOGGER = LoggerFactory.getLogger(DoiClientTest.class); private DoiClient doiClient; private String sampleDoi = "10.5072/13/50639BFE25F18"; private static JAXBContext context; private Marshaller marshaller; private Unmarshaller unmarshaller; public DoiClientTest() { super("au.edu.anu.datacommons.doi"); // LOGGER.trace("In Constructor"); // WebResource webResource = resource(); // DoiConfig doiConfig = new DoiConfigImpl(webResource.getURI().toString(), appId); // doiClient = new DoiClient(doiConfig); doiClient = new DoiClient(); } @BeforeClass public static void setUpBeforeClass() throws Exception { context = JAXBContext.newInstance(Resource.class); } @AfterClass public static void tearDownAfterClass() throws Exception { } @Before public void setUp() throws Exception { super.setUp(); marshaller = context.createMarshaller(); marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true); marshaller.setProperty(Marshaller.JAXB_SCHEMA_LOCATION, "http://datacite.org/schema/kernel-2.2 http://schema.datacite.org/meta/kernel-2.2/metadata.xsd"); } @After public void tearDown() throws Exception { super.tearDown(); } @Ignore public void testMint() { try { doiClient.mint("https://datacommons.anu.edu.au:8443/DataCommons/item/anudc:3320", generateSampleResource()); String respStr = doiClient.getDoiResponseAsString(); LOGGER.trace(respStr); } catch (Exception e) { failOnException(e); } } @Ignore public void testUpdate() { try { Resource res = new Resource(); Creators creators = new Creators(); Creator creator = new Creator(); CreatorName creatorName = new CreatorName(); creatorName.setValue("Creator 1"); creator.setCreatorName(creatorName); creators.getCreator().add(creator); res.setCreators(creators); Titles titles = new Titles(); Title title = new Title(); title.setValue("Title 1"); titles.getTitle().add(title); res.setTitles(titles); res.setPublisher("Publisher 1"); res.setPublicationYear("1987"); Identifier id = new Identifier(); id.setValue(sampleDoi); id.setIdentifierType("DOI"); res.setIdentifier(id); doiClient.update(sampleDoi, null, res); Resource newRes = doiClient.getMetadata(sampleDoi); String resAsStr = getResourceAsString(newRes); LOGGER.trace(resAsStr); } catch (Exception e) { failOnException(e); } } @Ignore public void testDeactivate() { try { doiClient.deactivate(sampleDoi); assertTrue(doiClient.getDoiResponseAsString().indexOf("AbsolutePath:" + resource().getURI().toString() + "deactivate.xml/") != -1); // assertTrue(doiClient.getDoiResponseAsString().indexOf("QueryParam:app_id=TEST" + appId) != -1); assertTrue(doiClient.getDoiResponseAsString().indexOf("QueryParam:doi=" + sampleDoi) != -1); } catch (Exception e) { failOnException(e); } } @Ignore public void testActivate() { try { doiClient.activate(sampleDoi); assertTrue(doiClient.getDoiResponseAsString().indexOf("AbsolutePath:" + resource().getURI().toString() + "activate.xml/") != -1); // assertTrue(doiClient.getDoiResponseAsString().indexOf("QueryParam:app_id=TEST" + appId) != -1); assertTrue(doiClient.getDoiResponseAsString().indexOf("QueryParam:doi=" + sampleDoi) != -1); } catch (Exception e)<|fim▁hole|> } @Test public void testGetDoiMetaData() { try { Resource res = doiClient.getMetadata(sampleDoi); StringWriter strW = new StringWriter(); marshaller.marshal(res, strW); // assertTrue(doiClient.getDoiResponseAsString().indexOf("AbsolutePath:" + resource().getURI().toString() + "xml.xml/") != -1); // assertTrue(doiClient.getDoiResponseAsString().indexOf("QueryParam:doi=" + sampleDoi) != -1); } catch (Exception e) { failOnException(e); } } private Resource generateSampleResource() { Resource metadata = new Resource(); Titles titles = new Titles(); Title title1 = new Title(); title1.setValue("Some title without a type"); titles.getTitle().add(title1); metadata.setTitles(titles); Creators creators = new Creators(); metadata.setCreators(creators); Creator creator = new Creator(); CreatorName creatorName = new CreatorName(); creatorName.setValue("Smith, John"); creator.setCreatorName(creatorName); metadata.getCreators().getCreator().add(creator); metadata.setPublisher("Some random publisher"); metadata.setPublicationYear("2010"); return metadata; } private String getResourceAsString(Resource res) throws JAXBException { StringWriter strW = new StringWriter(); marshaller.marshal(res, strW); return strW.toString(); } private void failOnException(Throwable e) { LOGGER.error(e.getMessage(), e); fail(e.getMessage()); } }<|fim▁end|>
{ failOnException(e); }
<|file_name|>tests.rs<|end_file_name|><|fim▁begin|>use super::{Tok, ErrorCode, Error, Tokenizer, CaseInsensitiveUserStr, UserStr}; use super::Tok::*; enum Expectation<'a> { ExpectTok(Tok<'a>), ExpectErr(ErrorCode), } use self::Expectation::*; fn gen_test(input: &str, expected: Vec<(&str, Expectation)>) { // use $ to signal EOL because it can be replaced with a single space // for spans, and because it applies also to r#XXX# style strings: let input = input.replace("$", "\n"); let tokenizer = Tokenizer::new(&input); let len = expected.len(); for (token, (expected_span, expectation)) in tokenizer.zip(expected.into_iter()) { let expected_start = expected_span.find("~").unwrap(); let expected_end = expected_span.rfind("~").unwrap() + 1; println!("token: {:?}", token); match expectation { ExpectTok(expected_tok) => { assert_eq!(Ok((expected_start, expected_tok, expected_end)), token); } ExpectErr(expected_ec) => { assert_eq!( Err(Error { location: expected_start, code: expected_ec, }), token ) } } } // The string should end either with a let tokenizer = Tokenizer::new(&input); match tokenizer.skip(len).next() { a @ Some(_) => assert_eq!(Some(Ok((0, EOS, 0))), a), x => assert_eq!(None, x), } } fn test(input: &str, expected: Vec<(&str, Tok)>) { let generic_expected = expected .into_iter() .map(|(span, tok)| (span, ExpectTok(tok))) .collect(); gen_test(input, generic_expected); } fn test_err(input: &str, expected: (&str, ErrorCode)) { let (span, ec) = expected; gen_test(input, vec![(span, ExpectErr(ec))]) } mod fortran_user_string { use super::{CaseInsensitiveUserStr, UserStr}; #[test] fn basic() { assert_eq!( CaseInsensitiveUserStr::new("hello"), CaseInsensitiveUserStr::new("HELLO") ); assert_eq!( CaseInsensitiveUserStr::new("hello"), CaseInsensitiveUserStr::new("Hello") ); } #[test] fn split() { assert_eq!( UserStr::new("hello"), UserStr::new( r"h& &ello", ) ); assert_eq!( CaseInsensitiveUserStr::new("hello"), CaseInsensitiveUserStr::new( r"h& &eLLo", ) ); } }<|fim▁hole|>#[cfg_attr(rustfmt, rustfmt_skip)] mod tokenizer_tests { use super::{test, test_err}; use super::{CaseInsensitiveUserStr, UserStr}; use super::Tok::*; use super::ErrorCode::*; #[test] fn basic() { test("+ $", vec![ ("~ ", Plus), (" ~", EOS) ]); } #[test] fn error() { test_err(".NO", ("~ ", UnterminatedOperator) ); } #[test] fn operators() { test(".AND. .OR. .LT. .LE. .GT. .GE. .CUSTOM.", vec![ ("~~~~~ ", And), (" ~~~~ ", Or), (" ~~~~ ", LessThan), (" ~~~~ ", LessThanOrEquals), (" ~~~~ ", GreaterThan), (" ~~~~ ", GreaterThanOrEquals), (" ~~~~~~~~", DefinedOperator(CaseInsensitiveUserStr::new("CUSTOM"))), ]); } #[test] fn operators_lowercase() { test(".and. .or. .lt. .le. .gt. .ge. .custom.", vec![ ("~~~~~ ", And), (" ~~~~ ", Or), (" ~~~~ ", LessThan), (" ~~~~ ", LessThanOrEquals), (" ~~~~ ", GreaterThan), (" ~~~~ ", GreaterThanOrEquals), (" ~~~~~~~~", DefinedOperator(CaseInsensitiveUserStr::new("CUSTOM"))), ]); } #[test] fn operators_camelcase() { test(".And. .Or. .Lt. .Le. .Gt. .Ge. .Custom.", vec![ ("~~~~~ ", And), (" ~~~~ ", Or), (" ~~~~ ", LessThan), (" ~~~~ ", LessThanOrEquals), (" ~~~~ ", GreaterThan), (" ~~~~ ", GreaterThanOrEquals), (" ~~~~~~~~", DefinedOperator(CaseInsensitiveUserStr::new("CUSTOM"))), ]); } #[test] fn keywords() { test("PROGRAM END PRINT", vec![ ("~~~~~~~ ", Program), (" ~~~ ", End), (" ~~~~~", Print), ]); } #[test] fn keywords_lowercase() { test("program end print", vec![ ("~~~~~~~ ", Program), (" ~~~ ", End), (" ~~~~~", Print), ]); } #[test] fn keywords_camelcase() { test("Program End Print", vec![ ("~~~~~~~ ", Program), (" ~~~ ", End), (" ~~~~~", Print), ]); } #[test] fn logicals() { test(".TRUE. .FALSE.", vec![ ("~~~~~~ ", True), (" ~~~~~~~", False), ]); } #[test] fn hello_world() { test(r#"PROGRAM hello; print *,"Hello, world!"; END PROGRAM hello"#, vec![ ( "~~~~~~~ ", Program), ( " ~~~~~ ", Id(CaseInsensitiveUserStr::new("hello"))), ( " ~ ", EOS), ( " ~~~~~ ", Print), ( " ~ ", Star), ( " ~ ", Comma), ( " ~~~~~~~~~~~~~~~ ", CharLiteralConstant(UserStr::new("Hello, world!"))), ( " ~ ", EOS), ( " ~~~ ", End), ( " ~~~~~~~ ", Program), ( " ~~~~~", Id(CaseInsensitiveUserStr::new("hello"))), ]); } #[test] fn split_keywords() { test("&$pr&$ &og&$ &ram &$ hel&$ &lo", vec![ (" ~~~~~~~~~~~~~~~~~~~~~ ", Program), (" ~~~~~~~~~~~~", Id(CaseInsensitiveUserStr::new("hello"))), ]); } }<|fim▁end|>
<|file_name|>xparser.py<|end_file_name|><|fim▁begin|># # This is a parser that generates the document tree for you. # # To use this parser, create an instance of XElementParser: # parser = saxexts.make_parser() # xp = XElementParser(parser) # # If you have defined classes in the current environment, you might want ot # pass this environment *to* the parser, so your classes will be created as # tree nodes instead of the default (base) XElement class instances: # # # def MyElementClass1(XElement): ... # def MyElementClass2(XElement): ... # ... # # parser = saxexts.make_parser() # xp = XElementParser(parser, vars()) # # Once your parser is constructed, you can parse one or more documents as # follows: # doc_list = ['f1','f2','f3'] # -or- # doc_list = ['url1','url2','url3'] # # for doc in doc_list: # doc_tree = xp.process(doc) # print doc_tree.toXML() import string import sys import types from xml.sax import saxexts from xml.sax import saxlib from xelement import XElement, XTreeHandler class XElementParser: def __init__(self, outer_env={}, parser=None): if parser == None: self.parser = saxexts.XMLValParserFactory.make_parser() else: self.parser = parser self.parser_error_handler = ErrorPrinter() self.parser.setErrorHandler(self.parser_error_handler) self.xth = XTreeHandler(IgnoreWhiteSpace='yes', RemoveWhiteSpace='yes', CreateElementMap='yes', RequireUserClasses='yes') for x in outer_env.keys(): if type(outer_env[x]) == types.ClassType or isinstance(x, object): self.xth.registerElementClass(outer_env[x], x) self.parser.setDocumentHandler(self.xth) def process(self, document_uri): Ok=None try: self.parser_error_handler.reset() self.parser.parse(document_uri) if self.parser_error_handler.has_errors(): raise "validation failed" return self.xth.getDocument().getChild() except IOError,e: print "\nI/O Error: " + document_uri + ": " + str(e)<|fim▁hole|> except saxlib.SAXException,e: print "\nParse Error: " + document_uri + ": " + str(e) class ErrorPrinter: "A simple class that just prints error messages to standard out." def __init__(self): self.error_count = 0 def reset(self): self.error_count = 0 def has_errors(self): return self.error_count def warning(self, exception): print "Warning: %s %s" % (str(exception), exception.getMessage()) sys.exit(1) def error(self, exception): self.error_count = self.error_count + 1 print "Error: %s %s" % (str(exception), exception.getMessage()) def fatalError(self, exception): self.error_count = self.error_count + 1 print "Fatal Error: %s %s" % (str(exception), exception.getMessage())<|fim▁end|>
<|file_name|>clean.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python import os import sys import shutil import glob<|fim▁hole|>import fnmatch mydir = os.path.dirname(os.path.realpath(__file__)) projectdir = os.path.realpath(mydir + "/../") dirs = ['dist','deb_dist','build','.tox','.eggs','.cache','psicrawler.egg-info'] files = ['.coverage'] def out(msg): sys.stdout.write(msg + "\n") sys.stdout.flush() for d in dirs: try: targetdir = projectdir + "/" + d shutil.rmtree(targetdir) out(targetdir) except OSError: pass for f in files: try: targetfile = projectdir + "/" + f os.remove(targetfile) out(targetfile) except OSError: pass for root, dirnames, filenames in os.walk(projectdir): if root.endswith("__pycache__"): out(root) shutil.rmtree(root) else: for fn in filenames: if fn.endswith('.pyc'): f = os.path.join(root, fn) out(f) os.remove(f)<|fim▁end|>
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # coding: utf-8 import os import sys from setuptools.command.test import test as TestCommand from jsonmodels import __version__, __author__, __email__ from setuptools import setup PROJECT_NAME = 'jsonmodels' if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() class PyTest(TestCommand): user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = ['--cov', PROJECT_NAME]<|fim▁hole|> self.test_suite = True def run_tests(self): import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) # Hacking tests. try: import tests except ImportError: pass else: if 'test' in sys.argv and '--no-lint' in sys.argv: tests.LINT = False del sys.argv[sys.argv.index('--no-lint')] if 'test' in sys.argv and '--spelling' in sys.argv: tests.CHECK_SPELLING = True del sys.argv[sys.argv.index('--spelling')] readme = open('README.rst').read() history = open('HISTORY.rst').read().replace('.. :changelog:', '') setup( name=PROJECT_NAME, version=__version__, description='Models to make easier to deal with structures that' ' are converted to, or read from JSON.', long_description=readme + '\n\n' + history, author=__author__, author_email=__email__, url='https://github.com/beregond/jsonmodels', packages=[ PROJECT_NAME, ], package_dir={PROJECT_NAME: PROJECT_NAME}, include_package_data=True, install_requires=[ 'python-dateutil', 'six', ], license="BSD", zip_safe=False, keywords=PROJECT_NAME, classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], cmdclass={ 'test': PyTest, }, )<|fim▁end|>
def finalize_options(self): TestCommand.finalize_options(self) self.test_args = []
<|file_name|>product-detail-content.component.ts<|end_file_name|><|fim▁begin|>import { Component} from '@angular/core'; import {RenderingUIAbstractComponent} from "../../../main/components/rendering-iu/rendering-ui.abstract.component"; @Component({ selector:'gota-home-content', moduleId : module.id || __moduleName,<|fim▁hole|>}) export class ProductDetailContentContentComponent extends RenderingUIAbstractComponent { renderUI() { } }<|fim▁end|>
templateUrl: './product-detail-content.component.html', styleUrls: ['./product-detail-content.component.css']
<|file_name|>1315-Sum of Nodes with Even-Valued Grandparent.py<|end_file_name|><|fim▁begin|># Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None<|fim▁hole|> def sumEvenGrandparent(self, root: TreeNode) -> int: total = 0 def dfs(root, p, gp): if root is None: return nonlocal total if gp and (gp.val & 1) == 0: total += root.val dfs(root.left, root, p) dfs(root.right, root, p) dfs(root, None, None) return total<|fim▁end|>
class Solution:
<|file_name|>private-method.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. struct cat { priv meows : uint, how_hungry : int, } <|fim▁hole|> self.nap(); } } impl cat { fn nap(&mut self) { for _ in range(1u, 10u) { } } } fn cat(in_x : uint, in_y : int) -> cat { cat { meows: in_x, how_hungry: in_y } } pub fn main() { let mut nyan : cat = cat(52u, 99); nyan.play(); }<|fim▁end|>
impl cat { pub fn play(&mut self) { self.meows += 1u;
<|file_name|>test_merge.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 TiKV Project Authors. Licensed under Apache-2.0. use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::*; use std::thread; use std::time::Duration; use grpcio::{ChannelBuilder, Environment}; use kvproto::kvrpcpb::*; use kvproto::raft_serverpb::{PeerState, RaftMessage, RegionLocalState}; use kvproto::tikvpb::TikvClient; use raft::eraftpb::MessageType; use engine_rocks::Compat; use engine_traits::{Peekable, CF_RAFT}; use pd_client::PdClient; use raftstore::store::*; use test_raftstore::*; use tikv_util::config::*; use tikv_util::time::Instant; use tikv_util::HandyRwLock; /// Test if merge is rollback as expected. #[test] fn test_node_merge_rollback() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); cluster.run_conf_change(); let region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); pd_client.must_add_peer(left.get_id(), new_peer(2, 2)); pd_client.must_add_peer(right.get_id(), new_peer(2, 4)); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let region = pd_client.get_region(b"k1").unwrap(); let target_region = pd_client.get_region(b"k3").unwrap(); let schedule_merge_fp = "on_schedule_merge"; fail::cfg(schedule_merge_fp, "return()").unwrap(); // The call is finished when prepare_merge is applied. cluster.must_try_merge(region.get_id(), target_region.get_id()); // Add a peer to trigger rollback. pd_client.must_add_peer(right.get_id(), new_peer(3, 5)); cluster.must_put(b"k4", b"v4"); must_get_equal(&cluster.get_engine(3), b"k4", b"v4"); let mut region = pd_client.get_region(b"k1").unwrap(); // After split and prepare_merge, version becomes 1 + 2 = 3; assert_eq!(region.get_region_epoch().get_version(), 3); // After ConfChange and prepare_merge, conf version becomes 1 + 2 = 3; assert_eq!(region.get_region_epoch().get_conf_ver(), 3); fail::remove(schedule_merge_fp); // Wait till rollback. cluster.must_put(b"k11", b"v11"); // After rollback, version becomes 3 + 1 = 4; region.mut_region_epoch().set_version(4); for i in 1..3 { must_get_equal(&cluster.get_engine(i), b"k11", b"v11"); let state_key = keys::region_state_key(region.get_id()); let state: RegionLocalState = cluster .get_engine(i) .c() .get_msg_cf(CF_RAFT, &state_key) .unwrap() .unwrap(); assert_eq!(state.get_state(), PeerState::Normal); assert_eq!(*state.get_region(), region); } pd_client.must_remove_peer(right.get_id(), new_peer(3, 5)); fail::cfg(schedule_merge_fp, "return()").unwrap(); let target_region = pd_client.get_region(b"k3").unwrap(); cluster.must_try_merge(region.get_id(), target_region.get_id()); let mut region = pd_client.get_region(b"k1").unwrap(); // Split to trigger rollback. cluster.must_split(&right, b"k3"); fail::remove(schedule_merge_fp); // Wait till rollback. cluster.must_put(b"k12", b"v12"); // After premerge and rollback, conf_ver becomes 3 + 1 = 4, version becomes 4 + 2 = 6; region.mut_region_epoch().set_conf_ver(4); region.mut_region_epoch().set_version(6); for i in 1..3 { must_get_equal(&cluster.get_engine(i), b"k12", b"v12"); let state_key = keys::region_state_key(region.get_id()); let state: RegionLocalState = cluster .get_engine(i) .c() .get_msg_cf(CF_RAFT, &state_key) .unwrap() .unwrap(); assert_eq!(state.get_state(), PeerState::Normal); assert_eq!(*state.get_region(), region); } } /// Test if merge is still working when restart a cluster during merge. #[test] fn test_node_merge_restart() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.run(); let pd_client = Arc::clone(&cluster.pd_client); let region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let schedule_merge_fp = "on_schedule_merge"; fail::cfg(schedule_merge_fp, "return()").unwrap(); cluster.must_try_merge(left.get_id(), right.get_id()); let leader = cluster.leader_of_region(left.get_id()).unwrap(); cluster.shutdown(); let engine = cluster.get_engine(leader.get_store_id()); let state_key = keys::region_state_key(left.get_id()); let state: RegionLocalState = engine.c().get_msg_cf(CF_RAFT, &state_key).unwrap().unwrap(); assert_eq!(state.get_state(), PeerState::Merging, "{:?}", state); let state_key = keys::region_state_key(right.get_id()); let state: RegionLocalState = engine.c().get_msg_cf(CF_RAFT, &state_key).unwrap().unwrap(); assert_eq!(state.get_state(), PeerState::Normal, "{:?}", state); fail::remove(schedule_merge_fp); cluster.start().unwrap(); // Wait till merge is finished. pd_client.check_merged_timeout(left.get_id(), Duration::from_secs(5)); cluster.must_put(b"k4", b"v4"); for i in 1..4 { must_get_equal(&cluster.get_engine(i), b"k4", b"v4"); let state_key = keys::region_state_key(left.get_id()); let state: RegionLocalState = cluster .get_engine(i) .c() .get_msg_cf(CF_RAFT, &state_key) .unwrap() .unwrap(); assert_eq!(state.get_state(), PeerState::Tombstone, "{:?}", state); let state_key = keys::region_state_key(right.get_id()); let state: RegionLocalState = cluster .get_engine(i) .c() .get_msg_cf(CF_RAFT, &state_key) .unwrap() .unwrap(); assert_eq!(state.get_state(), PeerState::Normal, "{:?}", state); assert!(state.get_region().get_start_key().is_empty()); assert!(state.get_region().get_end_key().is_empty()); } // Now test if cluster works fine when it crash after merge is applied // but before notifying raftstore thread. let region = pd_client.get_region(b"k1").unwrap(); let peer_on_store1 = find_peer(&region, 1).unwrap().to_owned(); cluster.must_transfer_leader(region.get_id(), peer_on_store1); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); let peer_on_store1 = find_peer(&left, 1).unwrap().to_owned(); cluster.must_transfer_leader(left.get_id(), peer_on_store1); cluster.must_put(b"k11", b"v11"); must_get_equal(&cluster.get_engine(3), b"k11", b"v11"); let skip_destroy_fp = "raft_store_skip_destroy_peer"; fail::cfg(skip_destroy_fp, "return()").unwrap(); cluster.add_send_filter(IsolationFilterFactory::new(3)); pd_client.must_merge(left.get_id(), right.get_id()); let peer = find_peer(&right, 3).unwrap().to_owned(); pd_client.must_remove_peer(right.get_id(), peer); cluster.shutdown(); fail::remove(skip_destroy_fp); cluster.clear_send_filters(); cluster.start().unwrap(); must_get_none(&cluster.get_engine(3), b"k1"); must_get_none(&cluster.get_engine(3), b"k3"); } /// Test if merge is still working when restart a cluster during catching up logs for merge. #[test] fn test_node_merge_catch_up_logs_restart() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.run(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let pd_client = Arc::clone(&cluster.pd_client); let region = pd_client.get_region(b"k1").unwrap(); let peer_on_store1 = find_peer(&region, 1).unwrap().to_owned(); cluster.must_transfer_leader(region.get_id(), peer_on_store1); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); // make sure the peer of left region on engine 3 has caught up logs. cluster.must_put(b"k0", b"v0"); must_get_equal(&cluster.get_engine(3), b"k0", b"v0"); cluster.add_send_filter(CloneFilterFactory( RegionPacketFilter::new(left.get_id(), 3) .direction(Direction::Recv) .msg_type(MessageType::MsgAppend), )); cluster.must_put(b"k11", b"v11"); must_get_none(&cluster.get_engine(3), b"k11"); // after source peer is applied but before set it to tombstone fail::cfg("after_handle_catch_up_logs_for_merge_1003", "return()").unwrap(); pd_client.must_merge(left.get_id(), right.get_id()); thread::sleep(Duration::from_millis(100)); cluster.shutdown(); fail::remove("after_handle_catch_up_logs_for_merge_1003"); cluster.start().unwrap(); must_get_equal(&cluster.get_engine(3), b"k11", b"v11"); } /// Test if leader election is working properly when catching up logs for merge. #[test] fn test_node_merge_catch_up_logs_leader_election() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(10); cluster.cfg.raft_store.raft_election_timeout_ticks = 25; cluster.cfg.raft_store.raft_log_gc_threshold = 12; cluster.cfg.raft_store.raft_log_gc_count_limit = 12; cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(100); cluster.run(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let pd_client = Arc::clone(&cluster.pd_client); let region = pd_client.get_region(b"k1").unwrap(); let peer_on_store1 = find_peer(&region, 1).unwrap().to_owned(); cluster.must_transfer_leader(region.get_id(), peer_on_store1); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); let state1 = cluster.truncated_state(1000, 1); // let the entries committed but not applied fail::cfg("on_handle_apply_1003", "pause").unwrap(); for i in 2..20 { cluster.must_put(format!("k1{}", i).as_bytes(), b"v"); } // wait to trigger compact raft log cluster.wait_log_truncated(1000, 1, state1.get_index() + 1); cluster.add_send_filter(CloneFilterFactory( RegionPacketFilter::new(left.get_id(), 3) .direction(Direction::Recv) .msg_type(MessageType::MsgAppend), )); cluster.must_put(b"k11", b"v11"); must_get_none(&cluster.get_engine(3), b"k11"); // let peer not destroyed before election timeout fail::cfg("before_peer_destroy_1003", "pause").unwrap(); fail::remove("on_handle_apply_1003"); pd_client.must_merge(left.get_id(), right.get_id()); // wait election timeout thread::sleep(Duration::from_millis(500)); fail::remove("before_peer_destroy_1003"); must_get_equal(&cluster.get_engine(3), b"k11", b"v11"); } // Test if merge is working properly if no need to catch up logs, // also there may be a propose of compact log after prepare merge is proposed. #[test] fn test_node_merge_catch_up_logs_no_need() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(10); cluster.cfg.raft_store.raft_election_timeout_ticks = 25; cluster.cfg.raft_store.raft_log_gc_threshold = 12; cluster.cfg.raft_store.raft_log_gc_count_limit = 12; cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(100); cluster.run(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let pd_client = Arc::clone(&cluster.pd_client); let region = pd_client.get_region(b"k1").unwrap(); let peer_on_store1 = find_peer(&region, 1).unwrap().to_owned(); cluster.must_transfer_leader(region.get_id(), peer_on_store1); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); // put some keys to trigger compact raft log for i in 2..20 { cluster.must_put(format!("k1{}", i).as_bytes(), b"v"); } // let the peer of left region on store 3 falls behind. cluster.add_send_filter(CloneFilterFactory( RegionPacketFilter::new(left.get_id(), 3) .direction(Direction::Recv) .msg_type(MessageType::MsgAppend), )); // make sure the peer is isolated. cluster.must_put(b"k11", b"v11"); must_get_none(&cluster.get_engine(3), b"k11"); // propose merge but not let apply index make progress. fail::cfg("apply_after_prepare_merge", "pause").unwrap(); pd_client.merge_region(left.get_id(), right.get_id()); must_get_none(&cluster.get_engine(3), b"k11"); // wait to trigger compact raft log thread::sleep(Duration::from_millis(100)); // let source region not merged fail::cfg("before_handle_catch_up_logs_for_merge", "pause").unwrap(); fail::cfg("after_handle_catch_up_logs_for_merge", "pause").unwrap(); // due to `before_handle_catch_up_logs_for_merge` failpoint, we already pass `apply_index < catch_up_logs.merge.get_commit()` // so now can let apply index make progress. fail::remove("apply_after_prepare_merge"); // make sure all the logs are committed, including the compact command cluster.clear_send_filters(); thread::sleep(Duration::from_millis(50)); // let merge process continue fail::remove("before_handle_catch_up_logs_for_merge"); fail::remove("after_handle_catch_up_logs_for_merge"); thread::sleep(Duration::from_millis(50)); // the source region should be merged and the peer should be destroyed. assert!(pd_client.check_merged(left.get_id())); must_get_equal(&cluster.get_engine(3), b"k11", b"v11"); cluster.must_region_not_exist(left.get_id(), 3); } /// Test if merging state will be removed after accepting a snapshot. #[test] fn test_node_merge_recover_snapshot() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.cfg.raft_store.raft_log_gc_threshold = 12; cluster.cfg.raft_store.raft_log_gc_count_limit = 12; let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); // Start the cluster and evict the region leader from peer 3. cluster.run(); cluster.must_transfer_leader(1, new_peer(1, 1)); let region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let region = pd_client.get_region(b"k3").unwrap(); let target_region = pd_client.get_region(b"k1").unwrap(); let schedule_merge_fp = "on_schedule_merge"; fail::cfg(schedule_merge_fp, "return()").unwrap(); cluster.must_try_merge(region.get_id(), target_region.get_id()); // Remove a peer to trigger rollback. pd_client.must_remove_peer(left.get_id(), left.get_peers()[0].to_owned()); must_get_none(&cluster.get_engine(3), b"k4"); let step_store_3_region_1 = "step_message_3_1"; fail::cfg(step_store_3_region_1, "return()").unwrap(); fail::remove(schedule_merge_fp); for i in 0..100 { cluster.must_put(format!("k4{}", i).as_bytes(), b"v4"); } fail::remove(step_store_3_region_1); must_get_equal(&cluster.get_engine(3), b"k40", b"v4"); cluster.must_transfer_leader(1, new_peer(3, 3)); cluster.must_put(b"k40", b"v5"); } // Test if a merge handled properly when there are two different snapshots of one region arrive // in one raftstore tick. #[test] fn test_node_merge_multiple_snapshots_together() { test_node_merge_multiple_snapshots(true) } // Test if a merge handled properly when there are two different snapshots of one region arrive // in different raftstore tick. #[test] fn test_node_merge_multiple_snapshots_not_together() { test_node_merge_multiple_snapshots(false) } fn test_node_merge_multiple_snapshots(together: bool) { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); ignore_merge_target_integrity(&mut cluster); let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); // make it gc quickly to trigger snapshot easily cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20); cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(10); cluster.cfg.raft_store.raft_log_gc_count_limit = 10; cluster.cfg.raft_store.merge_max_log_gap = 9; cluster.run(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k3").unwrap(); let target_leader = right .get_peers() .iter() .find(|p| p.get_store_id() == 1) .unwrap() .clone(); cluster.must_transfer_leader(right.get_id(), target_leader); let target_leader = left .get_peers() .iter() .find(|p| p.get_store_id() == 2) .unwrap() .clone(); cluster.must_transfer_leader(left.get_id(), target_leader); must_get_equal(&cluster.get_engine(1), b"k3", b"v3"); // So cluster becomes: // left region: 1 2(leader) I 3 // right region: 1(leader) 2 I 3 // I means isolation.(here just means 3 can not receive append log) cluster.add_send_filter(CloneFilterFactory( RegionPacketFilter::new(right.get_id(), 3) .direction(Direction::Recv) .msg_type(MessageType::MsgAppend), )); cluster.add_send_filter(CloneFilterFactory( RegionPacketFilter::new(left.get_id(), 3) .direction(Direction::Recv) .msg_type(MessageType::MsgAppend), )); // Add a collect snapshot filter, it will delay snapshots until have collected multiple snapshots from different peers cluster.sim.wl().add_recv_filter( 3, Box::new(LeadingDuplicatedSnapshotFilter::new( Arc::new(AtomicBool::new(false)), together, )), ); // Write some data to trigger a snapshot of right region. for i in 200..210 { let key = format!("k{}", i); let value = format!("v{}", i); cluster.must_put(key.as_bytes(), value.as_bytes()); } // Wait for snapshot to generate and send thread::sleep(Duration::from_millis(100)); // Merge left and right region, due to isolation, the regions on store 3 are not merged yet. pd_client.must_merge(left.get_id(), right.get_id()); thread::sleep(Duration::from_millis(200)); // Let peer of right region on store 3 to make append response to trigger a new snapshot // one is snapshot before merge, the other is snapshot after merge. // Here blocks raftstore for a while to make it not to apply snapshot and receive new log now. fail::cfg("on_raft_ready", "sleep(100)").unwrap(); cluster.clear_send_filters(); thread::sleep(Duration::from_millis(200)); // Filter message again to make sure peer on store 3 can not catch up CommitMerge log cluster.add_send_filter(CloneFilterFactory( RegionPacketFilter::new(left.get_id(), 3) .direction(Direction::Recv) .msg_type(MessageType::MsgAppend), )); cluster.add_send_filter(CloneFilterFactory( RegionPacketFilter::new(right.get_id(), 3) .direction(Direction::Recv) .msg_type(MessageType::MsgAppend), )); // Cause filter is added again, no need to block raftstore anymore fail::cfg("on_raft_ready", "off").unwrap(); // Wait some time to let already merged peer on store 1 or store 2 to notify // the peer of left region on store 3 is stale. thread::sleep(Duration::from_millis(300)); cluster.must_put(b"k9", b"v9"); // let follower can reach the new log, then commit merge cluster.clear_send_filters(); must_get_equal(&cluster.get_engine(3), b"k9", b"v9"); } // Test if compact log is ignored after premerge was applied and restart // I.e. is_merging flag should be set after restart #[test] fn test_node_merge_restart_after_apply_premerge_before_apply_compact_log() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.cfg.raft_store.merge_max_log_gap = 10; cluster.cfg.raft_store.raft_log_gc_count_limit = 11; // Rely on this config to trigger a compact log cluster.cfg.raft_store.raft_log_gc_size_limit = ReadableSize(1); cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(10); let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); cluster.run(); // Prevent gc_log_tick to propose a compact log let raft_gc_log_tick_fp = "on_raft_gc_log_tick"; fail::cfg(raft_gc_log_tick_fp, "return()").unwrap(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); let left_peer_1 = find_peer(&left, 1).cloned().unwrap(); cluster.must_transfer_leader(left.get_id(), left_peer_1); // Make log gap between store 1 and store 3, for min_index in preMerge cluster.add_send_filter(IsolationFilterFactory::new(3));<|fim▁hole|> for i in 0..6 { cluster.must_put(format!("k1{}", i).as_bytes(), b"v1"); } // Prevent on_apply_res to update merge_state in Peer // If not, almost everything cannot propose including compact log let on_apply_res_fp = "on_apply_res"; fail::cfg(on_apply_res_fp, "return()").unwrap(); cluster.must_try_merge(left.get_id(), right.get_id()); cluster.clear_send_filters(); // Prevent apply fsm to apply compact log let handle_apply_fp = "on_handle_apply"; fail::cfg(handle_apply_fp, "return()").unwrap(); fail::remove(raft_gc_log_tick_fp); // Wait for compact log to be proposed and committed maybe sleep_ms(30); cluster.shutdown(); fail::remove(handle_apply_fp); fail::remove(on_apply_res_fp); // Prevent sched_merge_tick to propose CommitMerge let schedule_merge_fp = "on_schedule_merge"; fail::cfg(schedule_merge_fp, "return()").unwrap(); cluster.start().unwrap(); let last_index = cluster.raft_local_state(left.get_id(), 1).get_last_index(); // Wait for compact log to apply let timer = Instant::now(); loop { let apply_index = cluster.apply_state(left.get_id(), 1).get_applied_index(); if apply_index >= last_index { break; } if timer.saturating_elapsed() > Duration::from_secs(3) { panic!("logs are not applied after 3 seconds"); } thread::sleep(Duration::from_millis(20)); } // Now schedule merge fail::remove(schedule_merge_fp); pd_client.check_merged_timeout(left.get_id(), Duration::from_secs(5)); cluster.must_put(b"k123", b"v2"); must_get_equal(&cluster.get_engine(3), b"k123", b"v2"); } /// Tests whether stale merge is rollback properly if it merges to the same target region again later. #[test] fn test_node_failed_merge_before_succeed_merge() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.cfg.raft_store.merge_max_log_gap = 30; cluster.cfg.raft_store.store_batch_system.max_batch_size = Some(1); cluster.cfg.raft_store.store_batch_system.pool_size = 2; let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); cluster.run(); for i in 0..10 { cluster.must_put(format!("k{}", i).as_bytes(), b"v1"); } let region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k5"); let left = pd_client.get_region(b"k1").unwrap(); let mut right = pd_client.get_region(b"k5").unwrap(); let left_peer_1 = find_peer(&left, 1).cloned().unwrap(); cluster.must_transfer_leader(left.get_id(), left_peer_1); let left_peer_3 = find_peer(&left, 3).cloned().unwrap(); assert_eq!(left_peer_3.get_id(), 1003); // Prevent sched_merge_tick to propose CommitMerge let schedule_merge_fp = "on_schedule_merge"; fail::cfg(schedule_merge_fp, "return").unwrap(); // To minimize peers log gap for merging cluster.must_put(b"k11", b"v2"); must_get_equal(&cluster.get_engine(2), b"k11", b"v2"); must_get_equal(&cluster.get_engine(3), b"k11", b"v2"); // Make peer 1003 can't receive PrepareMerge and RollbackMerge log cluster.add_send_filter(IsolationFilterFactory::new(3)); cluster.must_try_merge(left.get_id(), right.get_id()); // Change right region's epoch to make this merge failed cluster.must_split(&right, b"k8"); fail::remove(schedule_merge_fp); // Wait for left region to rollback merge cluster.must_put(b"k12", b"v2"); // Prevent apply fsm applying the `PrepareMerge` and `RollbackMerge` log after // cleaning send filter. let before_handle_normal_1003_fp = "before_handle_normal_1003"; fail::cfg(before_handle_normal_1003_fp, "return").unwrap(); cluster.clear_send_filters(); right = pd_client.get_region(b"k5").unwrap(); let right_peer_1 = find_peer(&right, 1).cloned().unwrap(); cluster.must_transfer_leader(right.get_id(), right_peer_1); // Add some data for checking data integrity check at a later time for i in 0..5 { cluster.must_put(format!("k2{}", i).as_bytes(), b"v3"); } // Do a really succeed merge pd_client.must_merge(left.get_id(), right.get_id()); // Wait right region to send CatchUpLogs to left region. sleep_ms(100); // After executing CatchUpLogs in source peer fsm, the committed log will send // to apply fsm in the end of this batch. So even the first `on_ready_prepare_merge` // is executed after CatchUplogs, the latter committed logs is still sent to apply fsm // if CatchUpLogs and `on_ready_prepare_merge` is in different batch. // // In this case, the data is complete because the wrong up-to-date msg from the // first `on_ready_prepare_merge` is sent after all committed log. // Sleep a while to wait apply fsm to send `on_ready_prepare_merge` to peer fsm. let after_send_to_apply_1003_fp = "after_send_to_apply_1003"; fail::cfg(after_send_to_apply_1003_fp, "sleep(300)").unwrap(); fail::remove(before_handle_normal_1003_fp); // Wait `after_send_to_apply_1003` timeout sleep_ms(300); fail::remove(after_send_to_apply_1003_fp); // Check the data integrity for i in 0..5 { must_get_equal(&cluster.get_engine(3), format!("k2{}", i).as_bytes(), b"v3"); } } /// Tests whether the source peer is destroyed correctly when transferring leader during committing merge. /// /// In the previous merge flow, target peer deletes meta of source peer without marking it as pending remove. /// If source peer becomes leader at the same time, it will panic due to corrupted meta. #[test] fn test_node_merge_transfer_leader() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.cfg.raft_store.store_batch_system.max_batch_size = Some(1); cluster.cfg.raft_store.store_batch_system.pool_size = 2; let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); cluster.run(); // To ensure the region has applied to its current term so that later `split` can success // without any retries. Then, `left_peer_3` will must be `1003`. let region = pd_client.get_region(b"k1").unwrap(); let peer_1 = find_peer(&region, 1).unwrap().to_owned(); cluster.must_transfer_leader(region.get_id(), peer_1); let k = b"k1_for_apply_to_current_term"; cluster.must_put(k, b"value"); must_get_equal(&cluster.get_engine(1), k, b"value"); cluster.must_split(&region, b"k2"); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); let left_peer_1 = find_peer(&left, 1).unwrap().to_owned(); cluster.must_transfer_leader(left.get_id(), left_peer_1.clone()); let left_peer_3 = find_peer(&left, 3).unwrap().to_owned(); assert_eq!(left_peer_3.get_id(), 1003); let schedule_merge_fp = "on_schedule_merge"; fail::cfg(schedule_merge_fp, "return()").unwrap(); cluster.must_try_merge(left.get_id(), right.get_id()); // Prevent peer 1003 to handle ready when it's leader let before_handle_raft_ready_1003 = "before_handle_raft_ready_1003"; fail::cfg(before_handle_raft_ready_1003, "pause").unwrap(); let epoch = cluster.get_region_epoch(left.get_id()); let mut transfer_leader_req = new_admin_request(left.get_id(), &epoch, new_transfer_leader_cmd(left_peer_3)); transfer_leader_req.mut_header().set_peer(left_peer_1); cluster .sim .rl() .async_command_on_node(1, transfer_leader_req, Callback::None) .unwrap(); fail::remove(schedule_merge_fp); pd_client.check_merged_timeout(left.get_id(), Duration::from_secs(5)); fail::remove(before_handle_raft_ready_1003); sleep_ms(100); cluster.must_put(b"k4", b"v4"); must_get_equal(&cluster.get_engine(3), b"k4", b"v4"); } #[test] fn test_node_merge_cascade_merge_with_apply_yield() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); cluster.run(); let region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k5"); let region = pd_client.get_region(b"k5").unwrap(); cluster.must_split(&region, b"k9"); for i in 0..10 { cluster.must_put(format!("k{}", i).as_bytes(), b"v1"); } let r1 = pd_client.get_region(b"k1").unwrap(); let r2 = pd_client.get_region(b"k5").unwrap(); let r3 = pd_client.get_region(b"k9").unwrap(); pd_client.must_merge(r2.get_id(), r1.get_id()); assert_eq!(r1.get_id(), 1000); let yield_apply_1000_fp = "yield_apply_1000"; fail::cfg(yield_apply_1000_fp, "80%3*return()").unwrap(); for i in 0..10 { cluster.must_put(format!("k{}", i).as_bytes(), b"v2"); } pd_client.must_merge(r3.get_id(), r1.get_id()); for i in 0..10 { cluster.must_put(format!("k{}", i).as_bytes(), b"v3"); } } // Test if the rollback merge proposal is proposed before the majority of peers want to rollback #[test] fn test_node_multiple_rollback_merge() { let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.cfg.raft_store.right_derive_when_split = true; cluster.cfg.raft_store.merge_check_tick_interval = ReadableDuration::millis(20); let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); cluster.run(); for i in 0..10 { cluster.must_put(format!("k{}", i).as_bytes(), b"v"); } let region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); let left_peer_1 = find_peer(&left, 1).unwrap().to_owned(); cluster.must_transfer_leader(left.get_id(), left_peer_1.clone()); assert_eq!(left_peer_1.get_id(), 1001); let on_schedule_merge_fp = "on_schedule_merge"; let on_check_merge_not_1001_fp = "on_check_merge_not_1001"; let mut right_peer_1_id = find_peer(&right, 1).unwrap().get_id(); for i in 0..3 { fail::cfg(on_schedule_merge_fp, "return()").unwrap(); cluster.must_try_merge(left.get_id(), right.get_id()); // Change the epoch of target region and the merge will fail pd_client.must_remove_peer(right.get_id(), new_peer(1, right_peer_1_id)); right_peer_1_id += 100; pd_client.must_add_peer(right.get_id(), new_peer(1, right_peer_1_id)); // Only the source leader is running `on_check_merge` fail::cfg(on_check_merge_not_1001_fp, "return()").unwrap(); fail::remove(on_schedule_merge_fp); // In previous implementation, rollback merge proposal can be proposed by leader itself // So wait for the leader propose rollback merge if possible sleep_ms(100); // Check if the source region is still in merging mode. let mut l_r = pd_client.get_region(b"k1").unwrap(); let req = new_request( l_r.get_id(), l_r.take_region_epoch(), vec![new_put_cf_cmd( "default", format!("k1{}", i).as_bytes(), b"vv", )], false, ); let resp = cluster .call_command_on_leader(req, Duration::from_millis(100)) .unwrap(); if !resp .get_header() .get_error() .get_message() .contains("merging mode") { panic!("resp {:?} does not contain merging mode error", resp); } fail::remove(on_check_merge_not_1001_fp); // Write data for waiting the merge to rollback easily cluster.must_put(format!("k1{}", i).as_bytes(), b"vv"); // Make sure source region is not merged to target region assert_eq!(pd_client.get_region(b"k1").unwrap().get_id(), left.get_id()); } } // In the previous implementation, the source peer will propose rollback merge // after the local target peer's epoch is larger than recorded previously. // But it's wrong. This test constructs a case that writing data to the source region // after merging. This operation can succeed in the previous implementation which // causes data loss. // In the current implementation, the rollback merge proposal can be proposed only when // the number of peers who want to rollback merge is greater than the majority of all // peers. If so, this merge is impossible to succeed. // PS: A peer who wants to rollback merge means its local target peer's epoch is larger // than recorded. #[test] fn test_node_merge_write_data_to_source_region_after_merging() { let mut cluster = new_node_cluster(0, 3); cluster.cfg.raft_store.merge_check_tick_interval = ReadableDuration::millis(100); // For snapshot after merging cluster.cfg.raft_store.merge_max_log_gap = 10; cluster.cfg.raft_store.raft_log_gc_count_limit = 12; cluster.cfg.raft_store.apply_batch_system.max_batch_size = Some(1); cluster.cfg.raft_store.apply_batch_system.pool_size = 2; let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); cluster.run(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k2", b"v2"); let mut region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); let right_peer_2 = find_peer(&right, 2).cloned().unwrap(); assert_eq!(right_peer_2.get_id(), 2); // Make sure peer 2 finish split before pause cluster.must_put(b"k2pause", b"vpause"); must_get_equal(&cluster.get_engine(2), b"k2pause", b"vpause"); let on_handle_apply_2_fp = "on_handle_apply_2"; fail::cfg(on_handle_apply_2_fp, "pause").unwrap(); let right_peer_1 = find_peer(&right, 1).cloned().unwrap(); cluster.must_transfer_leader(right.get_id(), right_peer_1); let left_peer_3 = find_peer(&left, 3).cloned().unwrap(); cluster.must_transfer_leader(left.get_id(), left_peer_3.clone()); let schedule_merge_fp = "on_schedule_merge"; fail::cfg(schedule_merge_fp, "return()").unwrap(); cluster.must_try_merge(left.get_id(), right.get_id()); cluster.add_send_filter(IsolationFilterFactory::new(3)); fail::remove(schedule_merge_fp); pd_client.check_merged_timeout(left.get_id(), Duration::from_secs(5)); region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); let state1 = cluster.apply_state(region.get_id(), 1); for i in 0..15 { cluster.must_put(format!("k2{}", i).as_bytes(), b"v2"); } cluster.wait_log_truncated(region.get_id(), 1, state1.get_applied_index()); // Ignore this msg to make left region exist. let on_has_merge_target_fp = "on_has_merge_target"; fail::cfg(on_has_merge_target_fp, "return").unwrap(); cluster.clear_send_filters(); // On store 3, now the right region is updated by snapshot not applying logs // so the left region still exist. // Wait for left region to rollback merge (in previous wrong implementation) sleep_ms(200); // Write data to left region let mut new_left = left; let mut epoch = new_left.take_region_epoch(); // prepareMerge => conf_ver + 1, version + 1 // rollbackMerge => version + 1 epoch.set_conf_ver(epoch.get_conf_ver() + 1); epoch.set_version(epoch.get_version() + 2); let mut req = new_request( new_left.get_id(), epoch, vec![new_put_cf_cmd("default", b"k11", b"v11")], false, ); req.mut_header().set_peer(left_peer_3); if let Ok(()) = cluster .sim .rl() .async_command_on_node(3, req, Callback::None) { sleep_ms(200); // The write must not succeed must_get_none(&cluster.get_engine(2), b"k11"); must_get_none(&cluster.get_engine(3), b"k11"); } fail::remove(on_handle_apply_2_fp); } /// In previous implementation, destroying its source peer(s) and applying snapshot is not **atomic**. /// It may break the rule of our merging process. /// /// A tikv crash after its source peers have destroyed but this target peer does not become to /// `Applying` state which means it will not apply snapshot after this tikv restarts. /// After this tikv restarts, a new leader may send logs to this target peer, then the panic may happen /// because it can not find its source peers when applying `CommitMerge` log. /// /// This test is to reproduce above situation. #[test] fn test_node_merge_crash_before_snapshot_then_catch_up_logs() { let mut cluster = new_node_cluster(0, 3); cluster.cfg.raft_store.merge_max_log_gap = 10; cluster.cfg.raft_store.raft_log_gc_count_limit = 11; cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(50); // Make merge check resume quickly. cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(10); cluster.cfg.raft_store.raft_election_timeout_ticks = 10; // election timeout must be greater than lease cluster.cfg.raft_store.raft_store_max_leader_lease = ReadableDuration::millis(90); cluster.cfg.raft_store.merge_check_tick_interval = ReadableDuration::millis(100); cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration::millis(500); let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); let on_raft_gc_log_tick_fp = "on_raft_gc_log_tick"; fail::cfg(on_raft_gc_log_tick_fp, "return()").unwrap(); cluster.run(); let mut region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k2").unwrap(); let left_on_store1 = find_peer(&left, 1).unwrap().to_owned(); cluster.must_transfer_leader(left.get_id(), left_on_store1); let right_on_store1 = find_peer(&right, 1).unwrap().to_owned(); cluster.must_transfer_leader(right.get_id(), right_on_store1); cluster.must_put(b"k1", b"v1"); cluster.add_send_filter(IsolationFilterFactory::new(3)); pd_client.must_merge(left.get_id(), right.get_id()); region = pd_client.get_region(b"k1").unwrap(); // Write some logs and the logs' number is greater than `raft_log_gc_count_limit` // for latter log compaction for i in 2..15 { cluster.must_put(format!("k{}", i).as_bytes(), b"v"); } // Aim at making peer 2 only know the compact log but do not know it is committed let condition = Arc::new(AtomicBool::new(false)); let recv_filter = Box::new( RegionPacketFilter::new(region.get_id(), 2) .direction(Direction::Recv) .when(condition.clone()) .set_msg_callback(Arc::new(move |msg: &RaftMessage| { if !condition.load(Ordering::Acquire) && msg.get_message().get_msg_type() == MessageType::MsgAppend && !msg.get_message().get_entries().is_empty() { condition.store(true, Ordering::Release); } })), ); cluster.sim.wl().add_recv_filter(2, recv_filter); let state1 = cluster.truncated_state(region.get_id(), 1); // Remove log compaction failpoint fail::remove(on_raft_gc_log_tick_fp); // Wait to trigger compact raft log cluster.wait_log_truncated(region.get_id(), 1, state1.get_index() + 1); let peer_on_store3 = find_peer(&region, 3).unwrap().to_owned(); assert_eq!(peer_on_store3.get_id(), 3); // Make peer 3 do not handle snapshot ready // In previous implementation, destroying its source peer and applying snapshot is not atomic. // So making its source peer be destroyed and do not apply snapshot to reproduce the problem let before_handle_snapshot_ready_3_fp = "before_handle_snapshot_ready_3"; fail::cfg(before_handle_snapshot_ready_3_fp, "return()").unwrap(); cluster.clear_send_filters(); // Peer 1 will send snapshot to peer 3 // Source peer sends msg to others to get target region info until the election timeout. // The max election timeout is 2 * 10 * 10 = 200ms let election_timeout = 2 * cluster.cfg.raft_store.raft_base_tick_interval.as_millis() * cluster.cfg.raft_store.raft_election_timeout_ticks as u64; sleep_ms(election_timeout + 100); cluster.stop_node(1); cluster.stop_node(3); cluster.sim.wl().clear_recv_filters(2); fail::remove(before_handle_snapshot_ready_3_fp); cluster.run_node(3).unwrap(); // Peer 2 will become leader and it don't know the compact log is committed. // So it will send logs not snapshot to peer 3 for i in 20..30 { cluster.must_put(format!("k{}", i).as_bytes(), b"v"); } must_get_equal(&cluster.get_engine(3), b"k29", b"v"); } /// Test if snapshot is applying correctly when crash happens. #[test] fn test_node_merge_crash_when_snapshot() { let mut cluster = new_node_cluster(0, 3); cluster.cfg.raft_store.merge_max_log_gap = 10; cluster.cfg.raft_store.raft_log_gc_count_limit = 11; cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(50); // Make merge check resume quickly. cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(10); cluster.cfg.raft_store.raft_election_timeout_ticks = 10; // election timeout must be greater than lease cluster.cfg.raft_store.raft_store_max_leader_lease = ReadableDuration::millis(90); cluster.cfg.raft_store.merge_check_tick_interval = ReadableDuration::millis(100); cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration::millis(500); let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); let on_raft_gc_log_tick_fp = "on_raft_gc_log_tick"; fail::cfg(on_raft_gc_log_tick_fp, "return()").unwrap(); cluster.run(); let mut region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); region = pd_client.get_region(b"k2").unwrap(); cluster.must_split(&region, b"k3"); region = pd_client.get_region(b"k3").unwrap(); cluster.must_split(&region, b"k4"); region = pd_client.get_region(b"k4").unwrap(); cluster.must_split(&region, b"k5"); let r1 = pd_client.get_region(b"k1").unwrap(); let r1_on_store1 = find_peer(&r1, 1).unwrap().to_owned(); cluster.must_transfer_leader(r1.get_id(), r1_on_store1); let r2 = pd_client.get_region(b"k2").unwrap(); let r2_on_store1 = find_peer(&r2, 1).unwrap().to_owned(); cluster.must_transfer_leader(r2.get_id(), r2_on_store1); let r3 = pd_client.get_region(b"k3").unwrap(); let r3_on_store1 = find_peer(&r3, 1).unwrap().to_owned(); cluster.must_transfer_leader(r3.get_id(), r3_on_store1); let r4 = pd_client.get_region(b"k4").unwrap(); let r4_on_store1 = find_peer(&r4, 1).unwrap().to_owned(); cluster.must_transfer_leader(r4.get_id(), r4_on_store1); let r5 = pd_client.get_region(b"k5").unwrap(); let r5_on_store1 = find_peer(&r5, 1).unwrap().to_owned(); cluster.must_transfer_leader(r5.get_id(), r5_on_store1); for i in 1..5 { cluster.must_put(format!("k{}", i).as_bytes(), b"v"); must_get_equal(&cluster.get_engine(3), format!("k{}", i).as_bytes(), b"v"); } cluster.add_send_filter(IsolationFilterFactory::new(3)); pd_client.must_merge(r2.get_id(), r3.get_id()); pd_client.must_merge(r4.get_id(), r3.get_id()); pd_client.must_merge(r1.get_id(), r3.get_id()); pd_client.must_merge(r5.get_id(), r3.get_id()); for i in 1..5 { for j in 1..20 { cluster.must_put(format!("k{}{}", i, j).as_bytes(), b"vvv"); } } region = pd_client.get_region(b"k1").unwrap(); let state1 = cluster.truncated_state(region.get_id(), 1); // Remove log compaction failpoint fail::remove(on_raft_gc_log_tick_fp); // Wait to trigger compact raft log cluster.wait_log_truncated(region.get_id(), 1, state1.get_index() + 1); let on_region_worker_apply_fp = "on_region_worker_apply"; fail::cfg(on_region_worker_apply_fp, "return()").unwrap(); let on_region_worker_destroy_fp = "on_region_worker_destroy"; fail::cfg(on_region_worker_destroy_fp, "return()").unwrap(); cluster.clear_send_filters(); let timer = Instant::now(); loop { let local_state = cluster.region_local_state(region.get_id(), 3); if local_state.get_state() == PeerState::Applying { break; } if timer.saturating_elapsed() > Duration::from_secs(1) { panic!("not become applying state after 1 seconds."); } sleep_ms(10); } cluster.stop_node(3); fail::remove(on_region_worker_apply_fp); fail::remove(on_region_worker_destroy_fp); cluster.run_node(3).unwrap(); for i in 1..5 { for j in 1..20 { must_get_equal( &cluster.get_engine(3), format!("k{}{}", i, j).as_bytes(), b"vvv", ); } } } #[test] fn test_prewrite_before_max_ts_is_synced() { let mut cluster = new_server_cluster(0, 3); configure_for_merge(&mut cluster); cluster.run(); // Transfer leader to node 1 first to ensure all operations happen on node 1 cluster.must_transfer_leader(1, new_peer(1, 1)); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let region = cluster.get_region(b"k1"); cluster.must_split(&region, b"k2"); let left = cluster.get_region(b"k1"); let right = cluster.get_region(b"k3"); let addr = cluster.sim.rl().get_addr(1); let env = Arc::new(Environment::new(1)); let channel = ChannelBuilder::new(env).connect(&addr); let client = TikvClient::new(channel); let do_prewrite = |cluster: &mut Cluster<ServerCluster>| { let region_id = right.get_id(); let leader = cluster.leader_of_region(region_id).unwrap(); let epoch = cluster.get_region_epoch(region_id); let mut ctx = Context::default(); ctx.set_region_id(region_id); ctx.set_peer(leader); ctx.set_region_epoch(epoch); let mut req = PrewriteRequest::default(); req.set_context(ctx); req.set_primary_lock(b"key".to_vec()); let mut mutation = Mutation::default(); mutation.set_op(Op::Put); mutation.set_key(b"key".to_vec()); mutation.set_value(b"value".to_vec()); req.mut_mutations().push(mutation); req.set_start_version(100); req.set_lock_ttl(20000); req.set_use_async_commit(true); client.kv_prewrite(&req).unwrap() }; fail::cfg("test_raftstore_get_tso", "return(50)").unwrap(); cluster.pd_client.must_merge(left.get_id(), right.get_id()); let resp = do_prewrite(&mut cluster); assert!(resp.get_region_error().has_max_timestamp_not_synced()); fail::remove("test_raftstore_get_tso"); thread::sleep(Duration::from_millis(200)); let resp = do_prewrite(&mut cluster); assert!(!resp.get_region_error().has_max_timestamp_not_synced()); }<|fim▁end|>
<|file_name|>tlsutility.hpp<|end_file_name|><|fim▁begin|>/****************************************************************************** * Icinga 2 * * Copyright (C) 2012-2016 Icinga Development Team (https://www.icinga.org/) * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of the GNU General Public License * * as published by the Free Software Foundation; either version 2 * * of the License, or (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software Foundation * * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. * ******************************************************************************/<|fim▁hole|>#define TLSUTILITY_H #include "base/i2-base.hpp" #include "base/object.hpp" #include "base/string.hpp" #include <openssl/ssl.h> #include <openssl/bio.h> #include <openssl/err.h> #include <openssl/comp.h> #include <openssl/sha.h> #include <openssl/x509v3.h> #include <openssl/evp.h> #include <openssl/rand.h> #include <boost/smart_ptr/shared_ptr.hpp> #include <boost/exception/info.hpp> namespace icinga { void I2_BASE_API InitializeOpenSSL(void); boost::shared_ptr<SSL_CTX> I2_BASE_API MakeSSLContext(const String& pubkey = String(), const String& privkey = String(), const String& cakey = String()); void I2_BASE_API AddCRLToSSLContext(const boost::shared_ptr<SSL_CTX>& context, const String& crlPath); void I2_BASE_API SetCipherListToSSLContext(const boost::shared_ptr<SSL_CTX>& context, const String& cipherList); void I2_BASE_API SetTlsProtocolminToSSLContext(const boost::shared_ptr<SSL_CTX>& context, const String& tlsProtocolmin); String I2_BASE_API GetCertificateCN(const boost::shared_ptr<X509>& certificate); boost::shared_ptr<X509> I2_BASE_API GetX509Certificate(const String& pemfile); int I2_BASE_API MakeX509CSR(const String& cn, const String& keyfile, const String& csrfile = String(), const String& certfile = String(), bool ca = false); boost::shared_ptr<X509> I2_BASE_API CreateCert(EVP_PKEY *pubkey, X509_NAME *subject, X509_NAME *issuer, EVP_PKEY *cakey, bool ca); String I2_BASE_API GetIcingaCADir(void); String I2_BASE_API CertificateToString(const boost::shared_ptr<X509>& cert); boost::shared_ptr<X509> I2_BASE_API CreateCertIcingaCA(EVP_PKEY *pubkey, X509_NAME *subject); String I2_BASE_API PBKDF2_SHA1(const String& password, const String& salt, int iterations); String I2_BASE_API SHA1(const String& s); String I2_BASE_API SHA256(const String& s); String I2_BASE_API RandomString(int length); class I2_BASE_API openssl_error : virtual public std::exception, virtual public boost::exception { }; struct errinfo_openssl_error_; typedef boost::error_info<struct errinfo_openssl_error_, unsigned long> errinfo_openssl_error; inline std::string to_string(const errinfo_openssl_error& e) { std::ostringstream tmp; int code = e.value(); char errbuf[120]; const char *message = ERR_error_string(code, errbuf); if (message == NULL) message = "Unknown error."; tmp << code << ", \"" << message << "\""; return "[errinfo_openssl_error]" + tmp.str() + "\n"; } } #endif /* TLSUTILITY_H */<|fim▁end|>
#ifndef TLSUTILITY_H
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! Syntax extension to generate FourCCs. Once loaded, fourcc!() is called with a single 4-character string, and an optional ident that is either `big`, `little`, or `target`. The ident represents endianness, and specifies in which direction the characters should be read. If the ident is omitted, it is assumed to be `big`, i.e. left-to-right order. It returns a u32. # Examples To load the extension and use it: ```rust,ignore #[phase(syntax)] extern crate fourcc; fn main() { let val = fourcc!("\xC0\xFF\xEE!"); assert_eq!(val, 0xC0FFEE21u32); let little_val = fourcc!("foo ", little); assert_eq!(little_val, 0x21EEFFC0u32); } ``` # References * [Wikipedia: FourCC](http://en.wikipedia.org/wiki/FourCC) */ #![crate_id = "fourcc#0.10"] #![crate_type = "rlib"] #![crate_type = "dylib"] #![license = "MIT/ASL2"] #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "http://www.rust-lang.org/favicon.ico", html_root_url = "http://static.rust-lang.org/doc/master")] #![deny(deprecated_owned_vector)] #![feature(macro_registrar, managed_boxes)] extern crate syntax; use syntax::ast; use syntax::ast::Name; use syntax::attr::contains; use syntax::codemap::{Span, mk_sp}; use syntax::ext::base; use syntax::ext::base::{SyntaxExtension, BasicMacroExpander, NormalTT, ExtCtxt, MRExpr}; use syntax::ext::build::AstBuilder; use syntax::parse; use syntax::parse::token; use syntax::parse::token::InternedString; #[macro_registrar] pub fn macro_registrar(register: |Name, SyntaxExtension|) { register(token::intern("fourcc"), NormalTT(~BasicMacroExpander { expander: expand_syntax_ext, span: None, }, None)); } pub fn expand_syntax_ext(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> base::MacResult { let (expr, endian) = parse_tts(cx, tts); let little = match endian { None => false, Some(Ident{ident, span}) => match token::get_ident(ident).get() { "little" => true, "big" => false, "target" => target_endian_little(cx, sp), _ => { cx.span_err(span, "invalid endian directive in fourcc!"); target_endian_little(cx, sp) } } }; <|fim▁hole|> // expression is a literal ast::ExprLit(lit) => match lit.node { // string literal ast::LitStr(ref s, _) => { if s.get().char_len() != 4 { cx.span_err(expr.span, "string literal with len != 4 in fourcc!"); } s } _ => { cx.span_err(expr.span, "unsupported literal in fourcc!"); return MRExpr(cx.expr_lit(sp, ast::LitUint(0u64, ast::TyU32))); } }, _ => { cx.span_err(expr.span, "non-literal in fourcc!"); return MRExpr(cx.expr_lit(sp, ast::LitUint(0u64, ast::TyU32))); } }; let mut val = 0u32; for codepoint in s.get().chars().take(4) { let byte = if codepoint as u32 > 0xFF { cx.span_err(expr.span, "fourcc! literal character out of range 0-255"); 0u8 } else { codepoint as u8 }; val = if little { (val >> 8) | ((byte as u32) << 24) } else { (val << 8) | (byte as u32) }; } let e = cx.expr_lit(sp, ast::LitUint(val as u64, ast::TyU32)); MRExpr(e) } struct Ident { ident: ast::Ident, span: Span } fn parse_tts(cx: &ExtCtxt, tts: &[ast::TokenTree]) -> (@ast::Expr, Option<Ident>) { let p = &mut parse::new_parser_from_tts(cx.parse_sess(), cx.cfg(), tts.iter() .map(|x| (*x).clone()) .collect()); let ex = p.parse_expr(); let id = if p.token == token::EOF { None } else { p.expect(&token::COMMA); let lo = p.span.lo; let ident = p.parse_ident(); let hi = p.last_span.hi; Some(Ident{ident: ident, span: mk_sp(lo, hi)}) }; if p.token != token::EOF { p.unexpected(); } (ex, id) } fn target_endian_little(cx: &ExtCtxt, sp: Span) -> bool { let meta = cx.meta_name_value(sp, InternedString::new("target_endian"), ast::LitStr(InternedString::new("little"), ast::CookedStr)); contains(cx.cfg().as_slice(), meta) } // FIXME (10872): This is required to prevent an LLVM assert on Windows #[test] fn dummy_test() { }<|fim▁end|>
let s = match expr.node {
<|file_name|>test_broker.py<|end_file_name|><|fim▁begin|># # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from txamqp.client import Closed from txamqp.queue import Empty from txamqp.content import Content from txamqp.testlib import TestBase, supportedBrokers, QPID, OPENAMQ from twisted.internet.defer import inlineCallbacks class ASaslPlainAuthenticationTest(TestBase): """Test for SASL PLAIN authentication Broker functionality""" @inlineCallbacks def authenticate(self,client,user,password): yield client.authenticate(user, password,mechanism='PLAIN') @inlineCallbacks def test_sasl_plain(self): channel = yield self.client.channel(200) yield channel.channel_open() yield channel.channel_close() class ASaslAmqPlainAuthenticationTest(TestBase): """Test for SASL AMQPLAIN authentication Broker functionality""" @inlineCallbacks def authenticate(self,client,user,password): yield client.authenticate(user, password,mechanism='AMQPLAIN') @inlineCallbacks def test_sasl_amq_plain(self): channel = yield self.client.channel(200) yield channel.channel_open() yield channel.channel_close() <|fim▁hole|> @inlineCallbacks def test_amqp_basic_13(self): """ First, this test tries to receive a message with a no-ack consumer. Second, this test tries to explicitely receive and acknowledge a message with an acknowledging consumer. """ ch = self.channel yield self.queue_declare(ch, queue = "myqueue") # No ack consumer ctag = (yield ch.basic_consume(queue = "myqueue", no_ack = True)).consumer_tag body = "test no-ack" ch.basic_publish(routing_key = "myqueue", content = Content(body)) msg = yield ((yield self.client.queue(ctag)).get(timeout = 5)) self.assert_(msg.content.body == body) # Acknowleding consumer yield self.queue_declare(ch, queue = "otherqueue") ctag = (yield ch.basic_consume(queue = "otherqueue", no_ack = False)).consumer_tag body = "test ack" ch.basic_publish(routing_key = "otherqueue", content = Content(body)) msg = yield ((yield self.client.queue(ctag)).get(timeout = 5)) ch.basic_ack(delivery_tag = msg.delivery_tag) self.assert_(msg.content.body == body) @inlineCallbacks def test_basic_delivery_immediate(self): """ Test basic message delivery where consume is issued before publish """ channel = self.channel yield self.exchange_declare(channel, exchange="test-exchange", type="direct") yield self.queue_declare(channel, queue="test-queue") yield channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key") reply = yield channel.basic_consume(queue="test-queue", no_ack=True) queue = yield self.client.queue(reply.consumer_tag) body = "Immediate Delivery" channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content(body), immediate=True) msg = yield queue.get(timeout=5) self.assert_(msg.content.body == body) # TODO: Ensure we fail if immediate=True and there's no consumer. @inlineCallbacks def test_basic_delivery_queued(self): """ Test basic message delivery where publish is issued before consume (i.e. requires queueing of the message) """ channel = self.channel yield self.exchange_declare(channel, exchange="test-exchange", type="direct") yield self.queue_declare(channel, queue="test-queue") yield channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key") body = "Queued Delivery" channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content(body)) reply = yield channel.basic_consume(queue="test-queue", no_ack=True) queue = yield self.client.queue(reply.consumer_tag) msg = yield queue.get(timeout=5) self.assert_(msg.content.body == body) @inlineCallbacks def test_invalid_channel(self): channel = yield self.client.channel(200) try: yield channel.queue_declare(exclusive=True) self.fail("Expected error on queue_declare for invalid channel") except Closed, e: self.assertConnectionException(504, e.args[0]) @inlineCallbacks def test_closed_channel(self): channel = yield self.client.channel(200) yield channel.channel_open() yield channel.channel_close() try: yield channel.queue_declare(exclusive=True) self.fail("Expected error on queue_declare for closed channel") except Closed, e: self.assertConnectionException(504, e.args[0]) @supportedBrokers(QPID, OPENAMQ) @inlineCallbacks def test_channel_flow(self): channel = self.channel yield channel.queue_declare(queue="flow_test_queue", exclusive=True) yield channel.basic_consume(consumer_tag="my-tag", queue="flow_test_queue") incoming = yield self.client.queue("my-tag") yield channel.channel_flow(active=False) channel.basic_publish(routing_key="flow_test_queue", content=Content("abcdefghijklmnopqrstuvwxyz")) try: yield incoming.get(timeout=1) self.fail("Received message when flow turned off.") except Empty: None yield channel.channel_flow(active=True) msg = yield incoming.get(timeout=1) self.assertEqual("abcdefghijklmnopqrstuvwxyz", msg.content.body)<|fim▁end|>
class BrokerTests(TestBase): """Tests for basic Broker functionality"""
<|file_name|>method-self-arg-aux1.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test method calls with self as an argument (cross-crate) #![allow(unknown_features)] #![feature(box_syntax)] // aux-build:method_self_arg1.rs extern crate method_self_arg1; use method_self_arg1::Foo; fn main() { let x = Foo; // Test external call. Foo::bar(&x);<|fim▁hole|> Foo::qux(box x); x.foo(&x); assert!(method_self_arg1::get_count() == 2u64*3*3*3*5*5*5*7*7*7); }<|fim▁end|>
Foo::baz(x);
<|file_name|>graph.rs<|end_file_name|><|fim▁begin|>//! A container for audio devices in an acyclic graph. //! //! A graph can be used when many audio devices need to connect in complex //! topologies. It can connect each output channel of a device to any input //! channel, provided that connection does not create a cycle. //! //! A graph is initialized by adding each device as a node in the graph, and //! then specifying the edges between devices. The graph will automatically //! process the devices in order of their dependencies. //! //! # Example //! //! The following example creates a graph with two different branches into //! a stereo output. It feeds the micropgone to the left channel, and //! a low-passed oscillator into the right channel. //! //! ```no_run //! use oxcable::filters::first_order::{Filter, LowPass}; //! use oxcable::graph::{DeviceGraph, Tick}; //! use oxcable::io::audio::AudioEngine; //! use oxcable::oscillator::*; //! //! let engine = AudioEngine::with_buffer_size(256).unwrap(); //! let mut graph = DeviceGraph::new(); //! //! // Add nodes to graph //! let microphone = graph.add_node(engine.default_input(1).unwrap()); //! let oscillator = graph.add_node(Oscillator::new(Sine).freq(440.0)); //! let filter = graph.add_node(Filter::new(LowPass(8000f32), 1)); //! let speaker = graph.add_node(engine.default_output(2).unwrap()); //! //! // Connect devices together //! graph.add_edge(microphone, 0, speaker, 0); //! graph.add_edge(oscillator, 0, filter, 0); //! graph.add_edge(filter, 0, speaker, 1); //! //! // Play audio ad nauseam. //! graph.tick_forever(); //! ``` use std::collections::VecDeque; use error::{Error, Result}; use types::{AudioDevice, Sample, Time}; pub use tick::Tick; /// An acyclic graph for audio devices. pub struct DeviceGraph { nodes: Vec<AudioNode>, // the actual nodes topology: Vec<usize>, // the order to tick the nodes bus: Vec<Sample>, // the audio bus to write samples to time: Time // the next timestep } impl DeviceGraph { /// Creates an empty graph. pub fn new() -> Self { DeviceGraph { nodes: Vec::new(), topology: Vec::new(), bus: Vec::new(), time: 0 } } /// Adds a new device into the graph, with no connections. Returns /// a identifier that refers back to this device. pub fn add_node<D>(&mut self, device: D) -> AudioNodeIdx where D: 'static+AudioDevice { let node = AudioNode::new(device, &mut self.bus); let idx = self.nodes.len(); self.nodes.push(node); self.topology.push(idx); AudioNodeIdx(idx) } /// Connects two devices in the graph. /// /// * `src` and `dest` are identifiers for the actual devices to connect. /// * `src_ch` and `dest_ch` are the channel indices of the two devices. /// /// If invalid indices are provided, or if the specified edge would create /// a cycle in the graph, an Err is returned and no changes dest the graph are /// made. pub fn add_edge(&mut self, src: AudioNodeIdx, src_ch: usize, dest: AudioNodeIdx, dest_ch: usize) -> Result<()> { // Check device indices let AudioNodeIdx(src_i) = src; let AudioNodeIdx(dest_i) = dest; if src_i >= self.nodes.len() { return Err(Error::OutOfRange("src")); } else if dest_i >= self.nodes.len() { return Err(Error::OutOfRange("dest")); } // Check channels if self.nodes[src_i].device.num_outputs() <= src_ch { return Err(Error::OutOfRange("src_ch")); } if self.nodes[dest_i].device.num_inputs() <= dest_ch { return Err(Error::OutOfRange("dest_ch")); } while self.nodes[dest_i].inputs.len() < dest_ch { self.nodes[dest_i].inputs.push(None); } // Set input let (start,_) = self.nodes[src_i].outputs; self.nodes[dest_i].inputs[dest_ch] = Some(start+src_ch); self.topological_sort(dest_i, dest_ch) } /// Determines the topology of our device graph. If the graph has a cycle, /// then we remove the last edge. Otherwise, we set self.topology to /// a topologically sorted order. fn topological_sort(&mut self, dest_i: usize, dest_ch: usize) -> Result<()> { // Intialize our set of input edges, and our set of edgeless nodes let mut topology = Vec::new(); let mut inputs: Vec<Vec<_>> = self.nodes.iter().map( |node| node.inputs.iter().filter_map(|&o| o).collect() ).collect(); let mut no_inputs: VecDeque<_> = inputs.iter().enumerate().filter_map( |(i, ins)| if ins.len() == 0 { Some(i) } else { None } ).collect(); // While there are nodes with no input, we choose one, add it as the // next node in our topology, and remove all edges from that node. Any // nodes that lose their final edge are added to the edgeless set. loop { match no_inputs.pop_front() { Some(i) => { topology.push(i); let (out_start, out_end) = self.nodes[i].outputs; for out in out_start..out_end { for (j, ins) in inputs.iter_mut().enumerate() { let mut idx = None; for k in 0..ins.len() { if ins[k] == out { idx = Some(k); break; } } match idx { Some(k) => {<|fim▁hole|> no_inputs.push_back(j); } }, None => () } } } }, None => break } } if topology.len() == self.nodes.len() { self.topology = topology; Ok(()) } else { self.nodes[dest_i].inputs[dest_ch] = None; Err(Error::CreatesCycle) } } } impl Tick for DeviceGraph { fn tick(&mut self) { for &i in self.topology.iter() { self.nodes[i].tick(self.time, &mut self.bus); } self.time += 1; } } /// An identifier used to refer back to a node in the graph. #[derive(Copy, Clone, Debug)] pub struct AudioNodeIdx(usize); /// A wrapper for a node in the graph. /// /// Management of indices in the bus is handled in the graph itself. struct AudioNode { device: Box<AudioDevice>, // wraps the device inputs: Vec<Option<usize>>, // bus indices of the inputs input_buf: Vec<Sample>, // an allocated buffer for containing inputs outputs: (usize, usize) // the range of outputs in the bus } impl AudioNode { /// Wraps the device in a new node fn new<D>(device: D, bus: &mut Vec<Sample>) -> AudioNode where D: 'static+AudioDevice { let num_in = device.num_inputs(); let num_out = device.num_outputs(); let start = bus.len(); for _ in 0..num_out { bus.push(0.0); } let end = bus.len(); AudioNode { device: Box::new(device), inputs: vec![None; num_in], input_buf: vec![0.0; num_in], outputs: (start, end) } } /// Extracts the inputs out of the bus, tick the device and place the outputs /// back into the bus. fn tick(&mut self, t: Time, bus: &mut[Sample]) { for (i, ch) in self.inputs.iter().enumerate() { self.input_buf[i] = ch.map_or(0.0, |j| bus[j]); } let (start, end) = self.outputs; self.device.tick(t, &self.input_buf, &mut bus[start..end]); } } #[cfg(test)] mod test { use testing::MockAudioDevice; use super::{DeviceGraph, Tick}; #[test] fn test_empty_graph() { DeviceGraph::new().tick(); } #[test] fn test_one_node() { let mut mock = MockAudioDevice::new("mock", 1, 1); mock.will_tick(&[0.0], &[1.0]); let mut graph = DeviceGraph::new(); graph.add_node(mock); graph.tick(); } #[test] fn test_disconnected() { let mut mock1 = MockAudioDevice::new("mock1", 1, 1); let mut mock2 = MockAudioDevice::new("mock2", 1, 1); mock1.will_tick(&[0.0], &[1.0]); mock2.will_tick(&[0.0], &[2.0]); let mut graph = DeviceGraph::new(); graph.add_node(mock1); graph.add_node(mock2); graph.tick(); } #[test] fn test_linear() { let mut mock1 = MockAudioDevice::new("mock1", 0, 1); let mut mock2 = MockAudioDevice::new("mock2", 1, 0); mock1.will_tick(&[], &[1.0]); mock2.will_tick(&[1.0], &[]); let mut graph = DeviceGraph::new(); let mock1 = graph.add_node(mock1); let mock2 = graph.add_node(mock2); graph.add_edge(mock1, 0, mock2, 0).unwrap(); graph.tick(); } #[test] fn test_complex() { let mut mock1 = MockAudioDevice::new("mock1", 1, 1); let mut mock2 = MockAudioDevice::new("mock2", 1, 1); let mut mock3 = MockAudioDevice::new("mock3", 2, 1); let mut mock4 = MockAudioDevice::new("mock4", 1, 1); let mut mock5 = MockAudioDevice::new("mock5", 1, 1); mock1.will_tick(&[0.0], &[1.0]); mock2.will_tick(&[4.0], &[2.0]); mock3.will_tick(&[2.0, 4.0], &[3.0]); mock4.will_tick(&[1.0], &[4.0]); mock5.will_tick(&[0.0], &[5.0]); let mut graph = DeviceGraph::new(); let mock1 = graph.add_node(mock1); let mock2 = graph.add_node(mock2); let mock3 = graph.add_node(mock3); let mock4 = graph.add_node(mock4); let _mock5 = graph.add_node(mock5); graph.add_edge(mock1, 0, mock4, 0).unwrap(); graph.add_edge(mock4, 0, mock2, 0).unwrap(); graph.add_edge(mock2, 0, mock3, 0).unwrap(); graph.add_edge(mock4, 0, mock3, 1).unwrap(); graph.tick(); } #[test] #[should_panic] fn test_direct_cycle() { let mock1 = MockAudioDevice::new("mock1", 1, 1); let mock2 = MockAudioDevice::new("mock2", 1, 1); let mut graph = DeviceGraph::new(); let mock1 = graph.add_node(mock1); let mock2 = graph.add_node(mock2); graph.add_edge(mock1, 0, mock2, 0).unwrap(); graph.add_edge(mock2, 0, mock1, 0).unwrap(); } #[test] #[should_panic] fn test_indirect_cycle() { let mock1 = MockAudioDevice::new("mock1", 1, 1); let mock2 = MockAudioDevice::new("mock2", 1, 1); let mock3 = MockAudioDevice::new("mock3", 1, 1); let mut graph = DeviceGraph::new(); let mock1 = graph.add_node(mock1); let mock2 = graph.add_node(mock2); let mock3 = graph.add_node(mock3); graph.add_edge(mock1, 0, mock2, 0).unwrap(); graph.add_edge(mock2, 0, mock3, 0).unwrap(); graph.add_edge(mock3, 0, mock1, 0).unwrap(); } }<|fim▁end|>
ins.swap_remove(k); if ins.len() == 0 {
<|file_name|>foundation.d.ts<|end_file_name|><|fim▁begin|>/** * Copyright 2017 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { MDCFoundation } from 'material__base'; import { cssClasses, strings } from './constants'; import { MSDDialogAdapter } from './adapter'; export class MDCDialogFoundation extends MDCFoundation<MSDDialogAdapter> { static readonly cssClasses: cssClasses; static readonly strings: strings; static readonly defaultAdapter: MSDDialogAdapter; open(): void; close(): void; isOpen(): boolean; accept(shouldNotify: boolean): void; cancel(shouldNotify: boolean): void; } <|fim▁hole|><|fim▁end|>
export default MDCDialogFoundation;
<|file_name|>helpers.go<|end_file_name|><|fim▁begin|>package travel import ( "os" "github.com/akerl/speculate/v2/creds" ) func clearEnvironment() error { for varName := range creds.Translations["envvar"] { logger.InfoMsgf("Unsetting env var: %s", varName) err := os.Unsetenv(varName) if err != nil { return err } } return nil } func stringInSlice(list []string, key string) bool { for _, item := range list { if item == key { return true } } return false } func sliceUnion(a []string, b []string) []string { var res []string for _, item := range a { if stringInSlice(b, item) { res = append(res, item) } } return res } type attrFunc func(Path) string func uniquePathAttributes(paths []Path, af attrFunc) []string { tmpMap := map[string]bool{} for _, item := range paths { attr := af(item) tmpMap[attr] = true } tmpList := []string{} for item := range tmpMap { tmpList = append(tmpList, item) } return tmpList }<|fim▁hole|> func filterPathsByAttribute(paths []Path, match string, af attrFunc) []Path { filteredPaths := []Path{} for _, item := range paths { if af(item) == match { filteredPaths = append(filteredPaths, item) } } return filteredPaths }<|fim▁end|>
<|file_name|>datasetsCtrl.js<|end_file_name|><|fim▁begin|>'use strict'; angular.module('hopsWorksApp') .controller('DatasetsCtrl', ['$scope', '$q', '$mdSidenav', '$mdUtil', '$log', 'DataSetService', 'JupyterService', '$routeParams', '$route', 'ModalService', 'growl', '$location', 'MetadataHelperService', '$rootScope', 'DelaProjectService', 'DelaClusterProjectService', function ($scope, $q, $mdSidenav, $mdUtil, $log, DataSetService, JupyterService, $routeParams, $route, ModalService, growl, $location, MetadataHelperService, $rootScope, DelaProjectService, DelaClusterProjectService) { var self = this; self.itemsPerPage = 14; self.working = false; //Some variables to keep track of state. self.files = []; //A list of files currently displayed to the user. self.projectId = $routeParams.projectID; //The id of the project we're currently working in. self.pathArray; //An array containing all the path components of the current path. If empty: project root directory. self.sharedPathArray; //An array containing all the path components of a path in a shared dataset self.highlighted; self.parentDS = $rootScope.parentDS; // Details of the currently selecte file/dir self.selected = null; //The index of the selected file in the files array. self.sharedPath = null; //The details about the currently selected file. self.routeParamArray = []; $scope.readme = null; var dataSetService = DataSetService(self.projectId); //The datasetservice for the current project. var delaHopsService = DelaProjectService(self.projectId); var delaClusterService = DelaClusterProjectService(self.projectId); $scope.all_selected = false; self.selectedFiles = {}; //Selected files self.dir_timing; self.isPublic = undefined; self.shared = undefined; self.status = undefined; self.tgState = true; self.onSuccess = function (e) { growl.success("Copied to clipboard", {title: '', ttl: 1000}); e.clearSelection(); }; self.metadataView = {}; self.availableTemplates = []; self.closeSlider = false; self.breadcrumbLen = function () { if (self.pathArray === undefined || self.pathArray === null) { return 0; } var displayPathLen = 10; if (self.pathArray.length <= displayPathLen) { return self.pathArray.length - 1; } return displayPathLen; }; self.cutBreadcrumbLen = function () { if (self.pathArray === undefined || self.pathArray === null) { return false; } if (self.pathArray.length - self.breadcrumbLen() > 0) { return true; } return false; }; $scope.sort = function (keyname) { $scope.sortKey = keyname; //set the sortKey to the param passed $scope.reverse = !$scope.reverse; //if true make it false and vice versa }; /** * watch for changes happening in service variables from the other controller */ $scope.$watchCollection(MetadataHelperService.getAvailableTemplates, function (availTemplates) { if (!angular.isUndefined(availTemplates)) { self.availableTemplates = availTemplates; } }); $scope.$watch(MetadataHelperService.getDirContents, function (response) { if (response === "true") { getDirContents(); MetadataHelperService.setDirContents("false"); } }); self.isSharedDs = function (name) { var top = name.split("::"); if (top.length === 1) { return false; } return true; }; self.isShared = function () { var top = self.pathArray[0].split("::"); if (top.length === 1) { return false; } return true; }; self.sharedDatasetPath = function () { var top = self.pathArray[0].split("::"); if (top.length === 1) { self.sharedPathArray = []; return; } // /proj::shared_ds/path/to -> /proj/ds/path/to // so, we add '1' to the pathLen self.sharedPathArray = new Array(self.pathArray.length + 1); self.sharedPathArray[0] = top[0]; self.sharedPathArray[1] = top[1]; for (var i = 1; i < self.pathArray.length; i++) { self.sharedPathArray[i + 1] = self.pathArray[i]; } return self.sharedPathArray; }; /* * Get all datasets under the current project. * @returns {undefined} */ self.getAllDatasets = function () { //Get the path for an empty patharray: will get the datasets var path = getPath([]); dataSetService.getContents(path).then( function (success) { self.files = success.data; self.pathArray = []; console.log(success); }, function (error) { console.log("Error getting all datasets in project " + self.projectId); console.log(error); }); }; /** * Get the contents of the directory at the path with the given path components and load it into the frontend. * @param {type} The array of path compontents to fetch. If empty, fetches the current path. * @returns {undefined} */ var getDirContents = function (pathComponents) { //Construct the new path array var newPathArray; if (pathComponents) { newPathArray = pathComponents; } else if (self.routeParamArray) { newPathArray = self.pathArray.concat(self.routeParamArray); } else { newPathArray = self.pathArray; } //Convert into a path var newPath = getPath(newPathArray); self.files = []; self.working = true; self.dir_timing = new Date().getTime(); //Get the contents and load them dataSetService.getContents(newPath).then( function (success) { //Clear any selections self.all_selected = false; self.selectedFiles = {}; //Reset the selected file self.selected = null; self.working = false; //Set the current files and path self.files = success.data; self.pathArray = newPathArray; console.log(success); // alert('Execution time: ' + (new Date().getTime() - self.dir_timing)); console.log('Execution time: ' + (new Date().getTime() - self.dir_timing)); if ($rootScope.selectedFile) { var filePathArray = self.pathArray.concat($rootScope.selectedFile); self.getFile(filePathArray); $rootScope.selectedFile = undefined; } }, function (error) { if (error.data.errorMsg.indexOf("Path is not a directory.") > -1) { var popped = newPathArray.pop(); console.log(popped); self.openDir({name: popped, dir: false, underConstruction: false}); self.pathArray = newPathArray; self.routeParamArray = []; //growl.info(error.data.errorMsg, {title: 'Info', ttl: 2000}); getDirContents(); } else if (error.data.errorMsg.indexOf("Path not found :") > -1) { self.routeParamArray = []; //$route.updateParams({fileName:''}); growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000, referenceId: 4}); getDirContents(); } self.working = false; console.log("Error getting the contents of the path " + getPath(newPathArray)); console.log(error); }); }; self.getFile = function (pathComponents) { var newPathArray; newPathArray = pathComponents; //Convert into a path var newPath = getPath(newPathArray); dataSetService.getFile(newPath).then( function (success) { self.highlighted = success.data; self.select(self.highlighted.name, self.highlighted, undefined); $scope.search = self.highlighted.name; }, function (error) { growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000, referenceId: 4}); }); }; var init = function () { //Check if the current dataset is set if ($routeParams.datasetName) { //Dataset is set: get the contents self.pathArray = [$routeParams.datasetName]; } else { //No current dataset is set: get all datasets. self.pathArray = []; } if ($routeParams.datasetName && $routeParams.fileName) { //file name is set: get the contents var paths = $routeParams.fileName.split("/"); paths.forEach(function (entry) { if (entry !== "") { self.routeParamArray.push(entry); } }); } getDirContents(); self.tgState = true; }; init(); /** * Upload a file to the specified path. * @param {type} path * @returns {undefined} */ var upload = function (path) { dataSetService.upload(path).then( function (success) { console.log("upload success"); console.log(success); getDirContents(); }, function (error) { console.log("upload error"); console.log(error); }); }; /** * Remove the inode at the given path. If called on a folder, will * remove the folder and all its contents recursively. * @param {type} path. The project-relative path to the inode to be removed. * @returns {undefined} */ var removeInode = function (path) { dataSetService.removeDataSetDir(path).then( function (success) { growl.success(success.data.successMessage, {title: 'Success', ttl: 1000}); getDirContents(); }, function (error) { growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000}); }); }; /** * Open a modal dialog for folder creation. The folder is created at the current path. * @returns {undefined} */ self.newDataSetModal = function () { ModalService.newFolder('md', getPath(self.pathArray)).then( function (success) { growl.success(success.data.successMessage, {title: 'Success', ttl: 1000}); getDirContents(); }, function (error) { //The user changed his/her mind. Don't really need to do anything. // getDirContents(); }); }; /** * Delete the file with the given name under the current path. * If called on a folder, will remove the folder * and all its contents recursively. * @param {type} fileName * @returns {undefined} */ self.deleteFile = function (fileName) { var removePathArray = self.pathArray.slice(0); removePathArray.push(fileName); removeInode('file/' + getPath(removePathArray)); }; /** * Delete the dataset with the given name under the current path. * @param {type} fileName * @returns {undefined} */ self.deleteDataset = function (fileName) { var removePathArray = self.pathArray.slice(0); removePathArray.push(fileName); removeInode(getPath(removePathArray)); }; // self.deleteSelected = function () { // var removePathArray = self.pathArray.slice(0); // for(var fileName in self.selectedFiles){ // removePathArray.push(fileName); // removeInode(getPath(removePathArray)); // } // }; /** * Makes the dataset public for anybody within the local cluster or any outside cluster. * @param id inodeId */ self.sharingDataset = {}; self.shareWithHops = function (id) { ModalService.confirm('sm', 'Confirm', 'Are you sure you want to make this DataSet public? \n\ This will make all its files available for any registered user to download and process.').then( function (success) { self.sharingDataset[id] = true; delaHopsService.shareWithHopsByInodeId(id).then( function (success) { self.sharingDataset[id] = false; growl.success(success.data.successMessage, {title: 'The DataSet is now Public(Hops Site).', ttl: 1500}); getDirContents(); }, function (error) { self.sharingDataset[id] = false; growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000}); }); } ); }; /** * Makes the dataset public for anybody within the local cluster * @param id inodeId */ self.shareWithCluster = function (id) { ModalService.confirm('sm', 'Confirm', 'Are you sure you want to make this DataSet public? \n\ This will make all its files available for any cluster user to share and process.').then( function (success) { self.sharingDataset[id] = true; delaClusterService.shareWithClusterByInodeId(id).then( function (success) { self.sharingDataset[id] = false; growl.success(success.data.successMessage, {title: 'The DataSet is now Public(Cluster).', ttl: 1500}); getDirContents(); }, function (error) { self.sharingDataset[id] = false; growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000}); }); } ); }; self.showManifest = function(publicDSId){ delaHopsService.getManifest(publicDSId).then(function(success){ var manifest = success.data; ModalService.json('md','Manifest', manifest).then(function(){ }); }); }; self.unshareFromHops = function (publicDSId) { ModalService.confirm('sm', 'Confirm', 'Are you sure you want to make this DataSet private? ').then( function (success) { delaHopsService.unshareFromHops(publicDSId, false).then( function (success) { growl.success(success.data.successMessage, {title: 'The DataSet is not Public(internet) anymore.', ttl: 1500}); getDirContents(); }, function (error) { growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000, referenceId: 4}); }); } ); }; self.unshareFromCluster = function (inodeId) { ModalService.confirm('sm', 'Confirm', 'Are you sure you want to make this DataSet private? ').then( function (success) { delaClusterService.unshareFromCluster(inodeId).then( function (success) { growl.success(success.data.successMessage, {title: 'The DataSet is not Public(cluster) anymore.', ttl: 1500}); getDirContents(); }, function (error) { growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000, referenceId: 4}); }); } ); }; self.parentPathArray = function () { var newPathArray = self.pathArray.slice(0); var clippedPath = newPathArray.splice(1, newPathArray.length - 1); return clippedPath; }; self.unzip = function (filename) { var pathArray = self.pathArray.slice(0); // pathArray.push(self.selected); pathArray.push(filename); var filePath = getPath(pathArray); growl.info("Started unzipping...", {title: 'Unzipping Started', ttl: 2000, referenceId: 4}); dataSetService.unzip(filePath).then( function (success) { growl.success("Refresh your browser when finished", {title: 'Unzipping in Background', ttl: 5000, referenceId: 4}); }, function (error) { growl.error(error.data.errorMsg, {title: 'Error unzipping file', ttl: 5000, referenceId: 4}); }); }; self.isZippedfile = function () { // https://stackoverflow.com/questions/680929/how-to-extract-extension-from-filename-string-in-javascript var re = /(?:\.([^.]+))?$/; var ext = re.exec(self.selected)[1]; switch (ext) { case "zip": return true; case "rar": return true; case "tar": return true; case "tgz": return true; case "gz": return true; case "bz2": return true; case "7z": return true; } return false; }; self.convertIPythonNotebook = function (filename) { var pathArray = self.pathArray.slice(0); pathArray.push(filename); //self.selected var filePath = getPath(pathArray); growl.info("Converting...", {title: 'Conversion Started', ttl: 2000, referenceId: 4}); JupyterService.convertIPythonNotebook(self.projectId, filePath).then( function (success) { growl.success("Finished - refresh your browser", {title: 'Converting in Background', ttl: 3000, referenceId: 4}); getDirContents(); }, function (error) { growl.error(error.data.errorMsg, {title: 'Error converting notebook', ttl: 5000, referenceId: 4}); }); }; self.isIPythonNotebook = function () { if (self.selected === null || self.selected === undefined) { return false; } if (self.selected.indexOf('.') == -1) { return false; } var ext = self.selected.split('.').pop(); if (ext === null || ext === undefined) { return false; } switch (ext) { case "ipynb": return true; } return false; }; /** * Preview the requested file in a Modal. If the file is README.md * and the preview flag is false, preview the file in datasets. * @param {type} dataset * @param {type} preview * @returns {undefined} */ self.filePreview = function (dataset, preview, readme) { var fileName = ""; //handle README.md filename for datasets browser viewing here if (readme && !preview) { if (dataset.shared === true) { fileName = dataset.selectedIndex + "/README.md"; } else { fileName = dataset.path.substring(dataset.path.lastIndexOf('/')).replace('/', '') + "/README.md"; } } else { fileName = dataset; } var previewPathArray = self.pathArray.slice(0); previewPathArray.push(fileName); var filePath = getPath(previewPathArray); //If filename is README.md then try fetching it without the modal if (readme && !preview) { dataSetService.filePreview(filePath, "head").then( function (success) { var fileDetails = JSON.parse(success.data.data); var content = fileDetails.filePreviewDTO[0].content; var conv = new showdown.Converter({parseImgDimensions: true}); $scope.readme = conv.makeHtml(content); }, function (error) { //To hide README from UI growl.error(error.data.errorMsg, {title: 'Error retrieving README file', ttl: 5000, referenceId: 4}); $scope.readme = null; }); } else { ModalService.filePreview('lg', fileName, filePath, self.projectId, "head").then( function (success) { }, function (error) { }); } }; self.copy = function (inodeId, name) { ModalService.selectDir('lg', "/[^]*/", "problem selecting folder").then(function (success) { var destPath = success; // Get the relative path of this DataSet, relative to the project home directory // replace only first occurrence var relPath = destPath.replace("/Projects/" + self.projectId + "/", ""); var finalPath = relPath + "/" + name; dataSetService.copy(inodeId, finalPath).then( function (success) { getDirContents(); growl.success('', {title: 'Copied ' + name + ' successfully', ttl: 5000, referenceId: 4}); }, function (error) { growl.error(error.data.errorMsg, {title: name + ' was not copied', ttl: 5000, referenceId: 4}); }); }, function (error) { }); }; self.copySelected = function () { //Check if we are to move one file or many if (Object.keys(self.selectedFiles).length === 0 && self.selectedFiles.constructor === Object) { if (self.selected !== null && self.selected !== undefined) { self.copy(self.selected.id, self.selected.name); } } else if (Object.keys(self.selectedFiles).length !== 0 && self.selectedFiles.constructor === Object) { ModalService.selectDir('lg', "/[^]*/", "problem selecting folder").then( function (success) { var destPath = success; // Get the relative path of this DataSet, relative to the project home directory // replace only first occurrence var relPath = destPath.replace("/Projects/" + self.projectId + "/", ""); //var finalPath = relPath + "/" + name; var names = []; var i = 0; //Check if have have multiple files for (var name in self.selectedFiles) { names[i] = name; i++; } var errorMsg = ''; for (var name in self.selectedFiles) { dataSetService.copy(self.selectedFiles[name].id, relPath + "/" + name).then( function (success) { //If we copied the last file if (name === names[names.length - 1]) { getDirContents(); for (var i = 0; i < names.length; i++) { delete self.selectedFiles[names[i]]; } self.all_selected = false; } //growl.success('',{title: 'Copied successfully', ttl: 5000, referenceId: 4}); }, function (error) { growl.error(error.data.errorMsg, {title: name + ' was not copied', ttl: 5000}); errorMsg = error.data.errorMsg; }); if (errorMsg === 'Can not copy/move to a public dataset.') { break; } } }, function (error) { //The user changed their mind. }); } }; self.move = function (inodeId, name) { ModalService.selectDir('lg', "/[^]*/", "problem selecting folder").then( function (success) { var destPath = success; // Get the relative path of this DataSet, relative to the project home directory // replace only first occurrence var relPath = destPath.replace("/Projects/" + self.projectId + "/", ""); var finalPath = relPath + "/" + name; dataSetService.move(inodeId, finalPath).then( function (success) { getDirContents(); growl.success(success.data.successMessage, {title: 'Moved successfully. Opened dest dir: ' + relPath, ttl: 2000}); }, function (error) { growl.error(error.data.errorMsg, {title: name + ' was not moved', ttl: 5000}); }); }, function (error) { }); }; self.isSelectedFiles = function () { return Object.keys(self.selectedFiles).length; }; self.moveSelected = function () { //Check if we are to move one file or many if (Object.keys(self.selectedFiles).length === 0 && self.selectedFiles.constructor === Object) { if (self.selected !== null && self.selected !== undefined) { self.move(self.selected.id, self.selected.name); } } else if (Object.keys(self.selectedFiles).length !== 0 && self.selectedFiles.constructor === Object) { ModalService.selectDir('lg', "/[^]*/", "problem selecting folder").then( function (success) { var destPath = success; // Get the relative path of this DataSet, relative to the project home directory // replace only first occurrence var relPath = destPath.replace("/Projects/" + self.projectId + "/", ""); //var finalPath = relPath + "/" + name; var names = []; var i = 0; //Check if have have multiple files for (var name in self.selectedFiles) { names[i] = name; i++; } var errorMsg = ''; for (var name in self.selectedFiles) { dataSetService.move(self.selectedFiles[name].id, relPath + "/" + name).then( function (success) { //If we moved the last file if (name === names[names.length - 1]) { getDirContents(); for (var i = 0; i < names.length; i++) { delete self.selectedFiles[names[i]]; } self.all_selected = false; } }, function (error) { growl.error(error.data.errorMsg, {title: name + ' was not moved', ttl: 5000}); errorMsg = error.data.errorMsg; }); if (errorMsg === 'Can not copy/move to a public dataset.') { break; } } }, function (error) { //The user changed their mind. }); } }; var renameModal = function (inodeId, name) { var pathComponents = self.pathArray.slice(0); var newPath = getPath(pathComponents); var destPath = newPath + '/'; ModalService.enterName('sm', "Rename File or Directory", name).then( function (success) { var fullPath = destPath + success.newName; dataSetService.move(inodeId, fullPath).then( function (success) { getDirContents(); self.all_selected = false; self.selectedFiles = {}; self.selected = null; }, function (error) { growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000, referenceId: 4}); self.all_selected = false; self.selectedFiles = {}; self.selected = null; }); }); }; self.rename = function (inodeId, name) { renameModal(inodeId, name); }; self.renameSelected = function () { if (self.isSelectedFiles() === 1) { var inodeId, inodeName; for (var name in self.selectedFiles) { inodeName = name; } inodeId = self.selectedFiles[inodeName]['id']; renameModal(inodeId, inodeName); } }; /** * Opens a modal dialog for file upload. * @returns {undefined} */ self.uploadFile = function () { var templateId = -1; ModalService.upload('lg', self.projectId, getPath(self.pathArray), templateId).then( function (success) { growl.success(success, {ttl: 5000}); getDirContents(); }, function (error) { // growl.info("Closed without saving.", {title: 'Info', ttl: 5000}); getDirContents(); }); }; /** * Sends a request to erasure code a file represented by the given path. * It checks * .. if the given path resolves to a file or a dir * .. if the given path is an existing file * .. if the given file is large enough (comprises more than 10 blocks) * * If all of the above are met, the compression takes place in an asynchronous operation * and the user gets notified when it finishes via a message * * @param {type} file * @returns {undefined} */ self.compress = function (file) { var pathArray = self.pathArray.slice(0); pathArray.push(file.name); var filePath = getPath(pathArray); //check if the path is a dir dataSetService.isDir(filePath).then( function (success) { var object = success.data.successMessage; switch (object) { case "DIR": ModalService.alert('sm', 'Alert', 'You can only compress files'); break; case "FILE": //if the path is a file go on dataSetService.checkFileExist(filePath).then( function (successs) { //check the number of blocks in the file dataSetService.checkFileBlocks(filePath).then( function (successss) { var noOfBlocks = parseInt(successss.data); console.log("NO OF BLOCKS " + noOfBlocks); if (noOfBlocks >= 10) { ModalService.alert('sm', 'Confirm', 'This operation is going to run in the background').then( function (modalSuccess) { console.log("FILE PATH IS " + filePath); dataSetService.compressFile(filePath); }); } else { growl.error("The requested file is too small to be compressed", {title: 'Error', ttl: 5000, referenceId: 4}); } }, function (error) { growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000, referenceId: 4}); }); }); } }, function (error) { growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000, referenceId: 4}); }); }; /** * Opens a modal dialog for sharing. * @returns {undefined} */ self.share = function (name) { ModalService.shareDataset('md', name).then( function (success) { growl.success(success.data.successMessage, {title: 'Success', ttl: 5000}); getDirContents(); }, function (error) { }); }; /** * Opens a modal dialog to make dataset editable * @param {type} name * @param {type} permissions * @returns {undefined} */ self.permissions = function (name, permissions) { ModalService.permissions('md', name, permissions).then( function (success) { growl.success(success.data.successMessage, {title: 'Success', ttl: 5000}); getDirContents(); }, function (error) { }); }; /** * Opens a modal dialog for unsharing. * @param {type} name * @returns {undefined} */ self.unshare = function (name) { ModalService.unshareDataset('md', name).then( function (success) { growl.success(success.data.successMessage, {title: 'Success', ttl: 5000}); getDirContents(); }, function (error) { }); }; /** * Upon click on a inode in the browser: * + If folder: open folder, fetch contents from server and display. * + If file: open a confirm dialog prompting for download. * @param {type} file * @returns {undefined} */ self.openDir = function (file) { if (file.dir) { var newPathArray = self.pathArray.slice(0); newPathArray.push(file.name); getDirContents(newPathArray); } else if (!file.underConstruction) { ModalService.confirm('sm', 'Confirm', 'Do you want to download this file?').then( function (success) { var downloadPathArray = self.pathArray.slice(0); downloadPathArray.push(file.name); var filePath = getPath(downloadPathArray); //growl.success("Asdfasdf", {title: 'asdfasd', ttl: 5000}); dataSetService.checkFileForDownload(filePath).then( function (success) { dataSetService.fileDownload(filePath); }, function (error) { growl.error(error.data.errorMsg, {title: 'Error', ttl: 5000}); }); } ); } else { growl.info("File under construction.", {title: 'Info', ttl: 5000}); } }; /** * Go up to parent directory. * @returns {undefined} */ self.back = function () { var newPathArray = self.pathArray.slice(0); newPathArray.pop(); if (newPathArray.length === 0) { $location.path('/project/' + self.projectId + '/datasets'); } else { getDirContents(newPathArray); } }; self.goToDataSetsDir = function () { $location.path('/project/' + self.projectId + '/datasets'); }; /** * Go to the folder at the index in the pathArray array. * @param {type} index * @returns {undefined} */ self.goToFolder = function (index) { var newPathArray = self.pathArray.slice(0); newPathArray.splice(index, newPathArray.length - index); getDirContents(newPathArray); }; self.menustyle = { "opacity": 0.2 }; /** * Select an inode; updates details panel. * @param {type} selectedIndex * @param {type} file * @param {type} event * @returns {undefined} */ self.select = function (selectedIndex, file, event) { // 1. Turn off the selected file at the top of the browser. // Add existing selected file (idempotent, if already added) // If file already selected, deselect it. if (event && event.ctrlKey) { } else { self.selectedFiles = {}; } if (self.isSelectedFiles() > 0) { self.selected = null; } else { self.selected = file.name; } self.selectedFiles[file.name] = file; self.selectedFiles[file.name].selectedIndex = selectedIndex; self.menustyle.opacity = 1.0; console.log(self.selectedFiles); }; self.haveSelected = function (file) { if (file === undefined || file === null || file.name === undefined || file.name === null) { return false; } if (file.name in self.selectedFiles) { return true; } return false; }; self.selectAll = function () { var i = 0; var min = Math.min(self.itemsPerPage, self.files.length); for (i = 0; i < min; i++) { var f = self.files[i]; self.selectedFiles[f.name] = f; self.selectedFiles[f.name].selectedIndex = i; } self.menustyle.opacity = 1; self.selected = null; self.all_selected = true; if (Object.keys(self.selectedFiles).length === 1 && self.selectedFiles.constructor === Object) { self.selected = Object.keys(self.selectedFiles)[0]; } }; //TODO: Move files to hdfs trash folder self.trashSelected = function () { }; self.deleteSelected = function () { var i = 0; var names = []; for (var name in self.selectedFiles) { names[i] = name; self.deleteFile(name); } for (var i = 0; i < names.length; i++) { delete self.selectedFiles[names[i]]; } self.all_selected = false; self.selectedFiles = {}; self.selected = null; }; self.deselect = function (selectedIndex, file, event) { var i = 0; if (Object.keys(self.selectedFiles).length === 1 && self.selectedFiles.constructor === Object) { for (var name in self.selectedFiles) { if (file.name === name) { delete self.selectedFiles[name]; //break; } } } else { if (event.ctrlKey) { for (var name in self.selectedFiles) { if (file.name === name) { delete self.selectedFiles[name]; break; } } } else { for (var name in self.selectedFiles) { if (file.name !== name) { delete self.selectedFiles[name]; //break; } } } } if (Object.keys(self.selectedFiles).length === 0 && self.selectedFiles.constructor === Object) {<|fim▁hole|> self.selected = null; } else if (Object.keys(self.selectedFiles).length === 1 && self.selectedFiles.constructor === Object) { self.menustyle.opacity = 1.0; self.selected = Object.keys(self.selectedFiles)[0]; } self.all_selected = false; }; self.deselectAll = function () { self.selectedFiles = {}; self.selected = null; self.sharedPath = null; self.menustyle.opacity = 0.2; }; self.toggleLeft = buildToggler('left'); self.toggleRight = buildToggler('right'); function buildToggler(navID) { var debounceFn = $mdUtil.debounce(function () { $mdSidenav(navID).toggle() .then(function () { MetadataHelperService.fetchAvailableTemplates() .then(function (response) { self.availableTemplates = JSON.parse(response.board).templates; }); }); }, 300); return debounceFn; } ; self.getSelectedPath = function (selectedFile) { if (self.isSelectedFiles() !== 1) { return ""; } return "hdfs://" + selectedFile.path; }; }]); /** * Turn the array <i>pathArray</i> containing, path components, into a path string. * @param {type} pathArray * @returns {String} */ var getPath = function (pathArray) { return pathArray.join("/"); };<|fim▁end|>
self.menustyle.opacity = 0.2;
<|file_name|>SqlStepper.amd.js<|end_file_name|><|fim▁begin|>;modjewel.define("weinre/target/SqlStepper", function(require, exports, module) { // Generated by CoffeeScript 1.3.3 var Binding, SqlStepper, executeSql, ourErrorCallback, runStep; Binding = require('../common/Binding'); module.exports = SqlStepper = (function() { function SqlStepper(steps) { var context; if (!(this instanceof SqlStepper)) { return new SqlStepper(steps); } this.__context = {}; context = this.__context; context.steps = steps; } SqlStepper.prototype.run = function(db, errorCallback) { var context; context = this.__context; if (context.hasBeenRun) { throw new Ex(arguments, "stepper has already been run"); } context.hasBeenRun = true; context.db = db; context.errorCallback = errorCallback; context.nextStep = 0; context.ourErrorCallback = new Binding(this, ourErrorCallback); context.runStep = new Binding(this, runStep); this.executeSql = new Binding(this, executeSql); return db.transaction(context.runStep); }; SqlStepper.example = function(db, id) { var errorCb, step1, step2, stepper; step1 = function() { return this.executeSql("SELECT name FROM sqlite_master WHERE type='table'"); }; step2 = function(resultSet) { var i, name, result, rows; rows = resultSet.rows; result = []; i = 0; while (i < rows.length) { name = rows.item(i).name; if (name === "__WebKitDatabaseInfoTable__") { i++; continue; } result.push(name); i++; } return console.log(("[" + this.id + "] table names: ") + result.join(", ")); }; errorCb = function(sqlError) { return console.log(("[" + this.id + "] sql error:" + sqlError.code + ": ") + sqlError.message); }; stepper = new SqlStepper([step1, step2]); stepper.id = id; return stepper.run(db, errorCb); }; return SqlStepper; })(); executeSql = function(statement, data) { var context; context = this.__context; return context.tx.executeSql(statement, data, context.runStep, context.ourErrorCallback); }; ourErrorCallback = function(tx, sqlError) { var context; context = this.__context; return context.errorCallback.call(this, sqlError); }; runStep = function(tx, resultSet) { var context, step; context = this.__context; if (context.nextStep >= context.steps.length) { return;<|fim▁hole|> step = context.steps[context.currentStep]; return step.call(this, resultSet); }; require("../common/MethodNamer").setNamesForClass(module.exports); });<|fim▁end|>
} context.tx = tx; context.currentStep = context.nextStep; context.nextStep++;
<|file_name|>job_queue.rs<|end_file_name|><|fim▁begin|>use std::collections::HashSet; use std::collections::hash_map::HashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::sync::TaskPool; use std::sync::mpsc::{channel, Sender, Receiver}; use term::color::YELLOW; use core::{Package, PackageId, Resolve, PackageSet}; use util::{Config, DependencyQueue, Fresh, Dirty, Freshness}; use util::{CargoResult, Dependency, profile}; use super::job::Job; /// A management structure of the entire dependency graph to compile. /// /// This structure is backed by the `DependencyQueue` type and manages the /// actual compilation step of each package. Packages enqueue units of work and /// then later on the entire graph is processed and compiled. pub struct JobQueue<'a, 'b> { pool: TaskPool, queue: DependencyQueue<(&'a PackageId, Stage), (&'a Package, Vec<(Job, Freshness)>)>, tx: Sender<Message>, rx: Receiver<Message>, resolve: &'a Resolve, packages: &'a PackageSet, active: u32, pending: HashMap<(&'a PackageId, Stage), PendingBuild>, state: HashMap<&'a PackageId, Freshness>, ignored: HashSet<&'a PackageId>, printed: HashSet<&'a PackageId>, } /// A helper structure for metadata about the state of a building package. struct PendingBuild { /// Number of jobs currently active amt: u32, /// Current freshness state of this package. Any dirty target within a /// package will cause the entire package to become dirty. fresh: Freshness, } /// Current stage of compilation for an individual package. /// /// This is the second layer of keys on the dependency queue to track the state /// of where a particular package is in the compilation pipeline. Each of these /// stages has a network of dependencies among them, outlined by the /// `Dependency` implementation found below. /// /// Each build step for a package is registered with one of these stages, and /// each stage has a vector of work to perform in parallel. #[derive(Hash, PartialEq, Eq, Clone, PartialOrd, Ord, Show, Copy)] pub enum Stage { Start, BuildCustomBuild, RunCustomBuild, Libraries, Binaries, LibraryTests, BinaryTests, } type Message = (PackageId, Stage, Freshness, CargoResult<()>); impl<'a, 'b> JobQueue<'a, 'b> { pub fn new(resolve: &'a Resolve, packages: &'a PackageSet, config: &Config) -> JobQueue<'a, 'b> { let (tx, rx) = channel(); JobQueue { pool: TaskPool::new(config.jobs() as usize), queue: DependencyQueue::new(), tx: tx, rx: rx, resolve: resolve, packages: packages, active: 0, pending: HashMap::new(), state: HashMap::new(), ignored: HashSet::new(), printed: HashSet::new(), } } pub fn enqueue(&mut self, pkg: &'a Package, stage: Stage, jobs: Vec<(Job, Freshness)>) { // Record the freshness state of this package as dirty if any job is // dirty or fresh otherwise let fresh = jobs.iter().fold(Fresh, |f1, &(_, f2)| f1.combine(f2)); match self.state.entry(pkg.get_package_id()) { Occupied(mut entry) => { *entry.get_mut() = entry.get().combine(fresh); } Vacant(entry) => { entry.insert(fresh); } }; // Add the package to the dependency graph self.queue.enqueue(&(self.resolve, self.packages), Fresh, (pkg.get_package_id(), stage), (pkg, jobs)); } pub fn ignore(&mut self, pkg: &'a Package) { self.ignored.insert(pkg.get_package_id()); } /// Execute all jobs necessary to build the dependency graph. /// /// This function will spawn off `config.jobs()` workers to build all of the /// necessary dependencies, in order. Freshness is propagated as far as /// possible along each dependency chain. pub fn execute(&mut self, config: &Config) -> CargoResult<()> { let _p = profile::start("executing the job graph"); // Iteratively execute the dependency graph. Each turn of this loop will // schedule as much work as possible and then wait for one job to finish, // possibly scheduling more work afterwards. while self.queue.len() > 0 { loop { match self.queue.dequeue() { Some((fresh, (_, stage), (pkg, jobs))) => { info!("start: {} {:?}", pkg, stage); try!(self.run(pkg, stage, fresh, jobs, config)); } None => break, } } // Now that all possible work has been scheduled, wait for a piece // of work to finish. If any package fails to build then we stop // scheduling work as quickly as possibly. let (id, stage, fresh, result) = self.rx.recv().unwrap(); info!(" end: {} {:?}", id, stage); let id = *self.state.keys().find(|&k| *k == &id).unwrap(); self.active -= 1; match result { Ok(()) => { let state = &mut self.pending[(id, stage)]; state.amt -= 1; state.fresh = state.fresh.combine(fresh); if state.amt == 0 { self.queue.finish(&(id, stage), state.fresh); } } Err(e) => { if self.active > 0 { try!(config.shell().say( "Build failed, waiting for other \ jobs to finish...", YELLOW)); for _ in self.rx.iter().take(self.active as usize) {} } return Err(e) } } } log!(5, "rustc jobs completed"); Ok(()) } /// Execute a stage of compilation for a package.<|fim▁hole|> /// /// The input freshness is from `dequeue()` and indicates the combined /// freshness of all upstream dependencies. This function will schedule all /// work in `jobs` to be executed. fn run(&mut self, pkg: &'a Package, stage: Stage, fresh: Freshness, jobs: Vec<(Job, Freshness)>, config: &Config) -> CargoResult<()> { let njobs = jobs.len(); let amt = if njobs == 0 {1} else {njobs as u32}; let id = pkg.get_package_id().clone(); // While the jobs are all running, we maintain some metadata about how // many are running, the current state of freshness (of all the combined // jobs), and the stage to pass to finish() later on. self.active += amt; self.pending.insert((pkg.get_package_id(), stage), PendingBuild { amt: amt, fresh: fresh, }); let mut total_fresh = fresh.combine(self.state[pkg.get_package_id()]); let mut running = Vec::new(); for (job, job_freshness) in jobs.into_iter() { let fresh = job_freshness.combine(fresh); total_fresh = total_fresh.combine(fresh); let my_tx = self.tx.clone(); let id = id.clone(); let (desc_tx, desc_rx) = channel(); self.pool.execute(move|| { my_tx.send((id, stage, fresh, job.run(fresh, desc_tx))).unwrap(); }); // only the first message of each job is processed match desc_rx.recv() { Ok(msg) => running.push(msg), Err(..) => {} } } // If no work was scheduled, make sure that a message is actually send // on this channel. if njobs == 0 { self.tx.send((id, stage, fresh, Ok(()))).unwrap(); } // Print out some nice progress information // // This isn't super trivial becuase we don't want to print loads and // loads of information to the console, but we also want to produce a // faithful representation of what's happening. This is somewhat nuanced // as a package can start compiling *very* early on because of custom // build commands and such. // // In general, we try to print "Compiling" for the first nontrivial task // run for a package, regardless of when that is. We then don't print // out any more information for a package after we've printed it once. let print = !self.ignored.contains(&pkg.get_package_id()); let print = print && !self.printed.contains(&pkg.get_package_id()); if print && (stage == Stage::Libraries || (total_fresh == Dirty && running.len() > 0)) { self.printed.insert(pkg.get_package_id()); match total_fresh { Fresh => try!(config.shell().verbose(|c| { c.status("Fresh", pkg) })), Dirty => try!(config.shell().status("Compiling", pkg)) } } for msg in running.iter() { try!(config.shell().verbose(|c| c.status("Running", msg))); } Ok(()) } } impl<'a> Dependency for (&'a PackageId, Stage) { type Context = (&'a Resolve, &'a PackageSet); fn dependencies(&self, &(resolve, packages): &(&'a Resolve, &'a PackageSet)) -> Vec<(&'a PackageId, Stage)> { // This implementation of `Dependency` is the driver for the structure // of the dependency graph of packages to be built. The "key" here is // a pair of the package being built and the stage that it's at. // // Each stage here lists dependencies on the previous stages except for // the start state which depends on the ending state of all dependent // packages (as determined by the resolve context). let (id, stage) = *self; let pkg = packages.iter().find(|p| p.get_package_id() == id).unwrap(); let deps = resolve.deps(id).into_iter().flat_map(|a| a) .filter(|dep| *dep != id) .map(|dep| { (dep, pkg.get_dependencies().iter().find(|d| { d.get_name() == dep.get_name() }).unwrap()) }); match stage { Stage::Start => Vec::new(), // Building the build command itself starts off pretty easily,we // just need to depend on all of the library stages of our own build // dependencies (making them available to us). Stage::BuildCustomBuild => { let mut base = vec![(id, Stage::Start)]; base.extend(deps.filter(|&(_, dep)| dep.is_build()) .map(|(id, _)| (id, Stage::Libraries))); base } // When running a custom build command, we need to be sure that our // own custom build command is actually built, and then we need to // wait for all our dependencies to finish their custom build // commands themselves (as they may provide input to us). Stage::RunCustomBuild => { let mut base = vec![(id, Stage::BuildCustomBuild)]; base.extend(deps.filter(|&(_, dep)| dep.is_transitive()) .map(|(id, _)| (id, Stage::RunCustomBuild))); base } // Building a library depends on our own custom build command plus // all our transitive dependencies. Stage::Libraries => { let mut base = vec![(id, Stage::RunCustomBuild)]; base.extend(deps.filter(|&(_, dep)| dep.is_transitive()) .map(|(id, _)| (id, Stage::Libraries))); base } // Binaries only depend on libraries being available. Note that they // do not depend on dev-dependencies. Stage::Binaries => vec![(id, Stage::Libraries)], // Tests depend on all dependencies (including dev-dependencies) in // addition to the library stage for this package. Note, however, // that library tests only need to depend the custom build command // being run, not the libraries themselves. Stage::BinaryTests | Stage::LibraryTests => { let mut base = if stage == Stage::BinaryTests { vec![(id, Stage::Libraries)] } else { vec![(id, Stage::RunCustomBuild)] }; base.extend(deps.map(|(id, _)| (id, Stage::Libraries))); base } } } }<|fim▁end|>
<|file_name|>neutralField1D.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python """ Tweak of profile plots as a function of nn. """ import pickle import matplotlib.pylab as plt import matplotlib.lines as mlines import numpy as np from subprocess import Popen import os, sys # If we add to sys.path, then it must be an absolute path commonDir = os.path.abspath("./../../../common") # Sys path is a list of system paths sys.path.append(commonDir) from CELMAPy.plotHelpers import PlotHelper, seqCMap3 scans = (\ "nn_9.9e+20" ,\ "nn_4e+19" ,\ "nn_1.5e+19" ,\ "nn_6.666666666666668e+18",\ "nn_2.5e+18" ,\ ) markers = ("*", "o", "s", "v", "^") ls = (\ (0, ()),\ (0, (5, 5)),\ (0, (3, 1, 1, 1)),\ (0, (3, 1, 1, 1, 1, 1)),\ (0, (1, 1)),\ ) fields = ("n", "phi", "jPar", "om", "ui", "sA", "ue") sD =\ {s:{f:\ {"ax":None,"line":None,"ls":l,"marker":m,"color":None}\ for f in fields}\ for s, m, l in zip(scans, markers, ls)} for direction in ("radial", "parallel"): for scan in scans: folder = "../../CSDXNeutralScanAr/visualizationPhysical/{}/field1D".format(scan) picklePath = os.path.join(folder,\ "mainFields-{}-1D-0.pickle".format(direction)) with open(picklePath, "rb") as f: fig = pickle.load(f) lnAx, phiAx, nAx, omDAx, jParAx, omAx, uiAx, nuiAx, ueAx, sAx =\ fig.get_axes() # Swap axes phiAx.set_position(omDAx.get_position()) sAx .set_position(nuiAx.get_position()) fig.delaxes(lnAx) fig.delaxes(omDAx) fig.delaxes(nuiAx) # Modify title position t = fig.texts[0] pos = list(t.get_position()) pos[1] = 0.75 t.set_position(pos) t.set_va("bottom") # Color adjust axes = (nAx, phiAx, jParAx, omAx, uiAx, sAx, ueAx) colors = seqCMap3(np.linspace(0, 1, len(axes))) # Recolor the lines for ax, color in zip(axes, colors): line = ax.get_lines()[0] line.set_color(color) # NOTE: Using the designated setter gives a comparion error line._markerfacecolor = color line._markeredgecolor = color # Set the colors sD[scan]["n"] ["color"] = colors[0] sD[scan]["phi"] ["color"] = colors[1] sD[scan]["jPar"]["color"] = colors[2] sD[scan]["om"] ["color"] = colors[3] sD[scan]["ui"] ["color"] = colors[4] sD[scan]["sA"] ["color"] = colors[5] sD[scan]["ue"] ["color"] = colors[6] # Set the lines sD[scan]["n"] ["line"] = nAx .get_lines()[0].get_data()[1] sD[scan]["phi"] ["line"] = phiAx .get_lines()[0].get_data()[1] sD[scan]["jPar"]["line"] = jParAx.get_lines()[0].get_data()[1] sD[scan]["om"] ["line"] = omAx .get_lines()[0].get_data()[1] sD[scan]["ui"] ["line"] = uiAx .get_lines()[0].get_data()[1] sD[scan]["sA"] ["line"] = sAx .get_lines()[0].get_data()[1] sD[scan]["ue"] ["line"] = ueAx .get_lines()[0].get_data()[1] xAxis = nAx.get_lines()[0].get_data()[0] # Clear the axes for ax in axes: ax.get_lines()[0].set_data((None,), (None,)) sD[scan]["n"] ["ax"] = nAx sD[scan]["phi"] ["ax"] = phiAx sD[scan]["jPar"]["ax"] = jParAx sD[scan]["om"] ["ax"] = omAx sD[scan]["ui"] ["ax"] = uiAx sD[scan]["sA"] ["ax"] = sAx sD[scan]["ue"] ["ax"] = ueAx # Plot for scan in scans: fig = sD[scans[0]]["n"]["ax"].figure for key in sD[scans[0]].keys(): sD[scans[0]][key]["ax"].plot(xAxis,\ sD[scan][key]["line"],\ color = sD[scan][key]["color"],\ marker = sD[scan][key]["marker"],\ ms = 5,\ markevery = 7,\ alpha = 0.7,\ ls = sD[scan][key]["ls"],\ ) sD[scans[0]][key]["ax"].autoscale(enable=True, axis="y", tight=True) # Put the legends on the y-axis handles, labels = sD[scans[0]][key]["ax"].get_legend_handles_labels() leg = sD[scans[0]][key]["ax"].legend() leg.remove() # Decrease fontsize by 1 to get some spacing sD[scans[0]][key]["ax"].set_ylabel(labels[0], fontsize=11) # Manually ajust the wspace as fig.subplots_adjust messes with the # spacing for key in ("phi", "om", "sA"): pos = sD[scans[0]][key]["ax"].get_position() pos = (pos.x0 + 0.05, pos.y0, pos.width, pos.height) sD[scans[0]][key]["ax"].set_position(pos) # Manually creating the legend handles = [] # Used for conversion n0 = 1e19 for scan in scans: curScan = n0/(n0+float(scan[3:]))*100 label = r"$d = {:d} \;\%$".format(int(curScan)) handle = mlines.Line2D([], [],\ color = "k" ,\ marker = sD[scan]["n"]["marker"],\ ls = sD[scan]["n"]["ls"] ,\ ms = 5 ,\ alpha = 0.7 ,\ label =label) handles.append(handle) # Put legends outside sD[scans[0]]["ue"]["ax"].legend(handles=handles,\ ncol=2,\ bbox_to_anchor=(1.15, 0.25),\ loc="upper left",\ borderaxespad=0.,\ bbox_transform =\ sD[scans[0]]["ue"]["ax"].transAxes,\ )<|fim▁hole|> if direction == "radial": fileName = "nnScanRad.pdf" elif direction == "parallel": fileName = "nnScanPar.pdf" PlotHelper.savePlot(fig, fileName)<|fim▁end|>
<|file_name|>test_branches.py<|end_file_name|><|fim▁begin|>import pytest from gitlabform.gitlab import AccessLevel from tests.acceptance import ( run_gitlabform, DEFAULT_README, get_gitlab, ) gl = get_gitlab() @pytest.fixture(scope="function") def branches(request, gitlab, group_and_project): branches = [ "protect_branch_but_allow_all", "protect_branch_with_code_owner_approval_required", "protect_branch_and_disallow_all", "protect_branch_and_allow_merges", "protect_branch_and_allow_pushes", "protect_branch_and_allow_merges_access_levels", "protect_branch_and_allow_pushes_access_levels", "protect_branch_and_allowed_to_push", "protect_branch_and_allowed_to_merge", "protect_branch_and_allow_access_levels_with_user_ids", "protect_branch", ] for branch in branches: gitlab.create_branch(group_and_project, branch, "main") def fin(): for branch in branches: gitlab.delete_branch(group_and_project, branch) gitlab.set_file( group_and_project, "main", "README.md", DEFAULT_README, "Reset default content", ) request.addfinalizer(fin) @pytest.fixture(scope="function") def one_maintainer_and_two_developers(gitlab, group_and_project, users): gitlab.add_member_to_project( group_and_project, users[0], AccessLevel.MAINTAINER.value ) gitlab.add_member_to_project( group_and_project, users[1], AccessLevel.DEVELOPER.value ) gitlab.add_member_to_project( group_and_project, users[2], AccessLevel.DEVELOPER.value ) yield group_and_project # we try to remove all users, not just the 3 added above, # on purpose, as more may have been added in the tests for user in users: gitlab.remove_member_from_project(group_and_project, user) class TestBranches: def test__protect_branch_but_allow_all(self, gitlab, group_and_project, branches): protect_branch_but_allow_all = f""" projects_and_groups: {group_and_project}: branches: protect_branch_but_allow_all: protected: true developers_can_push: true developers_can_merge: true """ run_gitlabform(protect_branch_but_allow_all, group_and_project) branch = gitlab.get_branch(group_and_project, "protect_branch_but_allow_all") assert branch["protected"] is True assert branch["developers_can_push"] is True assert branch["developers_can_merge"] is True # @pytest.mark.skipif( # gl.has_no_license(), reason="this test requires a GitLab license (Paid/Trial)" # ) # def test__code_owners_approval(self, gitlab, group_and_project, branches): # group_and_project = group_and_project # # branch_access_levels = gitlab.get_branch_access_levels( # group_and_project, "protect_branch_but_allow_all" # ) # assert branch_access_levels["code_owner_approval_required"] is False # # protect_branch_with_code_owner_approval_required = f""" # projects_and_groups: # {group_and_project}: # branches: # protect_branch_with_code_owner_approval_required: # protected: true # developers_can_push: false # developers_can_merge: true # code_owner_approval_required: true # """ # # run_gitlabform( # protect_branch_with_code_owner_approval_required, group_and_project # ) # # branch_access_levels = gitlab.get_branch_access_levels( # group_and_project, "protect_branch_with_code_owner_approval_required" # ) # assert branch_access_levels["code_owner_approval_required"] is True def test__protect_branch_and_disallow_all( self, gitlab, group_and_project, branches ): protect_branch_and_disallow_all = f""" projects_and_groups: {group_and_project}: branches: protect_branch_and_disallow_all: protected: true developers_can_push: false developers_can_merge: false """ run_gitlabform(protect_branch_and_disallow_all, group_and_project) branch = gitlab.get_branch(group_and_project, "protect_branch_and_disallow_all") assert branch["protected"] is True assert branch["developers_can_push"] is False assert branch["developers_can_merge"] is False def test__mixed_config(self, gitlab, group_and_project, branches): mixed_config = f""" projects_and_groups: {group_and_project}: branches: protect_branch_and_allow_merges: protected: true developers_can_push: false developers_can_merge: true protect_branch_and_allow_pushes: protected: true developers_can_push: true developers_can_merge: false """ run_gitlabform(mixed_config, group_and_project) branch = gitlab.get_branch(group_and_project, "protect_branch_and_allow_merges") assert branch["protected"] is True assert branch["developers_can_push"] is False assert branch["developers_can_merge"] is True branch = gitlab.get_branch(group_and_project, "protect_branch_and_allow_pushes") assert branch["protected"] is True assert branch["developers_can_push"] is True assert branch["developers_can_merge"] is False unprotect_branches = f""" projects_and_groups: {group_and_project}: branches: protect_branch_and_allow_merges: protected: false protect_branch_and_allow_pushes: protected: false """ run_gitlabform(unprotect_branches, group_and_project) for branch in [ "protect_branch_and_allow_merges", "protect_branch_and_allow_pushes", ]: branch = gitlab.get_branch(group_and_project, branch) assert branch["protected"] is False def test__mixed_config_with_new_api( self, gitlab, group_and_project, branches, users, one_maintainer_and_two_developers, ): mixed_config_with_access_levels = f""" projects_and_groups: {group_and_project}: branches: protect_branch_and_allow_merges_access_levels: protected: true push_access_level: {AccessLevel.NO_ACCESS.value} merge_access_level: {AccessLevel.DEVELOPER.value} unprotect_access_level: {AccessLevel.MAINTAINER.value} '*_allow_pushes_access_levels': protected: true push_access_level: {AccessLevel.DEVELOPER.value} merge_access_level: {AccessLevel.DEVELOPER.value} unprotect_access_level: {AccessLevel.MAINTAINER.value} """ run_gitlabform(mixed_config_with_access_levels, group_and_project) ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels( group_and_project, "protect_branch_and_allow_merges_access_levels" ) assert push_access_levels == [AccessLevel.NO_ACCESS.value] assert merge_access_levels == [AccessLevel.DEVELOPER.value] assert push_access_user_ids == [] assert merge_access_user_ids == [] assert unprotect_access_level is AccessLevel.MAINTAINER.value ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels( group_and_project, "*_allow_pushes_access_levels" ) assert push_access_levels == [AccessLevel.DEVELOPER.value] assert merge_access_levels == [AccessLevel.DEVELOPER.value] assert push_access_user_ids == [] assert merge_access_user_ids == [] assert unprotect_access_level is AccessLevel.MAINTAINER.value mixed_config_with_access_levels_update = f""" projects_and_groups: {group_and_project}: branches: protect_branch_and_allow_merges_access_levels: protected: true push_access_level: {AccessLevel.NO_ACCESS.value} merge_access_level: {AccessLevel.MAINTAINER.value} unprotect_access_level: {AccessLevel.MAINTAINER.value} '*_allow_pushes_access_levels': protected: true push_access_level: {AccessLevel.MAINTAINER.value} merge_access_level: {AccessLevel.MAINTAINER.value} unprotect_access_level: {AccessLevel.MAINTAINER.value} """ run_gitlabform(mixed_config_with_access_levels_update, group_and_project) ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels( group_and_project, "protect_branch_and_allow_merges_access_levels" ) assert push_access_levels == [AccessLevel.NO_ACCESS.value] assert merge_access_levels == [AccessLevel.MAINTAINER.value] assert push_access_user_ids == [] assert merge_access_user_ids == [] assert unprotect_access_level is AccessLevel.MAINTAINER.value ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels( group_and_project, "*_allow_pushes_access_levels" ) assert push_access_levels == [AccessLevel.MAINTAINER.value] assert merge_access_levels == [AccessLevel.MAINTAINER.value] assert push_access_user_ids == [] assert merge_access_user_ids == [] assert unprotect_access_level is AccessLevel.MAINTAINER.value mixed_config_with_access_levels_unprotect_branches = f""" projects_and_groups: {group_and_project}: branches: protect_branch_and_allow_merges_access_levels: protected: false '*_allow_pushes_access_levels': protected: false """ run_gitlabform( mixed_config_with_access_levels_unprotect_branches, group_and_project ) for branch in [ "protect_branch_and_allow_merges_access_levels", "protect_branch_and_allow_pushes_access_levels", ]: branch = gitlab.get_branch(group_and_project, branch) assert branch["protected"] is False @pytest.mark.skipif( gl.has_no_license(), reason="this test requires a GitLab license (Paid/Trial)" ) def test__allow_user_ids( self, gitlab, group_and_project, branches, users, one_maintainer_and_two_developers, ): user_allowed_to_push_id = gitlab.get_user_to_protect_branch(users[0]) user_allowed_to_merge_id = gitlab.get_user_to_protect_branch(users[1]) user_allowed_to_push_and_allowed_to_merge_id = ( gitlab.get_user_to_protect_branch(users[2]) ) # testing allowed_to_push and allowed_to_merge for user support on protect branch (gitlab premium feature) mixed_config_with_allowed_to_push_and_merge = f""" projects_and_groups: {group_and_project}: branches: protect_branch_and_allowed_to_merge: protected: true allowed_to_push: - access_level: {AccessLevel.NO_ACCESS.value} allowed_to_merge: - access_level: {AccessLevel.DEVELOPER.value} - user_id: {user_allowed_to_merge_id} - user: {users[2]} unprotect_access_level: {AccessLevel.MAINTAINER.value} '*_and_allowed_to_push': protected: true allowed_to_push: - access_level: {AccessLevel.DEVELOPER.value} - user_id: {user_allowed_to_push_id} - user: {users[1]} allowed_to_merge: - access_level: {AccessLevel.MAINTAINER.value} unprotect_access_level: {AccessLevel.DEVELOPER.value} """ run_gitlabform(mixed_config_with_allowed_to_push_and_merge, group_and_project) ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels( group_and_project, "protect_branch_and_allowed_to_merge" ) assert push_access_levels == [AccessLevel.NO_ACCESS.value] assert merge_access_levels == [AccessLevel.DEVELOPER.value] current_push_access_user_ids = [] current_push_access_user_ids.sort() assert push_access_user_ids == current_push_access_user_ids current_merge_access_user_ids = [ user_allowed_to_merge_id, user_allowed_to_push_and_allowed_to_merge_id, ] current_merge_access_user_ids.sort() assert merge_access_user_ids == current_merge_access_user_ids assert unprotect_access_level is AccessLevel.MAINTAINER.value ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels( group_and_project, "*_and_allowed_to_push" ) assert push_access_levels == [AccessLevel.DEVELOPER.value] assert merge_access_levels == [AccessLevel.MAINTAINER.value] current_push_access_user_ids = [ user_allowed_to_push_id, user_allowed_to_merge_id, ] current_push_access_user_ids.sort() assert push_access_user_ids == current_push_access_user_ids current_merge_access_user_ids = [] current_merge_access_user_ids.sort() assert merge_access_user_ids == current_merge_access_user_ids assert unprotect_access_level is AccessLevel.DEVELOPER.value mixed_config_with_allowed_to_push_and_merge_update = f""" projects_and_groups: {group_and_project}: branches: protect_branch_and_allowed_to_merge: protected: true allowed_to_push: - access_level: {AccessLevel.NO_ACCESS.value} allowed_to_merge: - access_level: {AccessLevel.MAINTAINER.value} - user_id: {user_allowed_to_merge_id} unprotect_access_level: {AccessLevel.MAINTAINER.value} '*_and_allowed_to_push': protected: true allowed_to_push: - access_level: {AccessLevel.MAINTAINER.value} - user_id: {user_allowed_to_push_id} - user: {users[2]} - user: {users[1]} allowed_to_merge: - access_level: {AccessLevel.MAINTAINER.value} unprotect_access_level: {AccessLevel.MAINTAINER.value} """ run_gitlabform( mixed_config_with_allowed_to_push_and_merge_update, group_and_project ) ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels( group_and_project, "protect_branch_and_allowed_to_merge" ) assert push_access_levels == [AccessLevel.NO_ACCESS.value] assert merge_access_levels == [AccessLevel.MAINTAINER.value] current_push_access_user_ids = [] current_push_access_user_ids.sort() assert push_access_user_ids == current_push_access_user_ids current_merge_access_user_ids = [user_allowed_to_merge_id] current_merge_access_user_ids.sort() assert merge_access_user_ids == current_merge_access_user_ids assert unprotect_access_level is AccessLevel.MAINTAINER.value ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels( group_and_project, "*_and_allowed_to_push" ) assert push_access_levels == [AccessLevel.MAINTAINER.value] assert merge_access_levels == [AccessLevel.MAINTAINER.value] current_push_access_user_ids = [ user_allowed_to_push_id, user_allowed_to_merge_id, user_allowed_to_push_and_allowed_to_merge_id, ] current_push_access_user_ids.sort() assert push_access_user_ids == current_push_access_user_ids current_merge_access_user_ids = [] current_merge_access_user_ids.sort() assert merge_access_user_ids == current_merge_access_user_ids assert unprotect_access_level is AccessLevel.MAINTAINER.value mixed_config_with_allow_access_levels_with_user_ids = f""" projects_and_groups: {group_and_project}: branches: protect_branch_and_allow_access_levels_with_user_ids: protected: true push_access_level: {AccessLevel.DEVELOPER.value} merge_access_level: {AccessLevel.MAINTAINER.value} allowed_to_push: - access_level: {AccessLevel.MAINTAINER.value} - user_id: {user_allowed_to_push_id} - user: {users[2]} allowed_to_merge: - access_level: {AccessLevel.DEVELOPER.value} - user_id: {user_allowed_to_merge_id} - user: {users[0]} unprotect_access_level: {AccessLevel.MAINTAINER.value} """ run_gitlabform( mixed_config_with_allow_access_levels_with_user_ids, group_and_project ) ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels( group_and_project, "protect_branch_and_allow_access_levels_with_user_ids", ) assert push_access_levels == [ AccessLevel.DEVELOPER.value, AccessLevel.MAINTAINER.value, ] assert merge_access_levels == [ AccessLevel.DEVELOPER.value, AccessLevel.MAINTAINER.value, ] current_push_access_user_ids = [ user_allowed_to_push_id, user_allowed_to_push_and_allowed_to_merge_id, ] current_push_access_user_ids.sort() assert push_access_user_ids == current_push_access_user_ids current_merge_access_user_ids = [ user_allowed_to_merge_id, user_allowed_to_push_id, ] current_merge_access_user_ids.sort() assert merge_access_user_ids == current_merge_access_user_ids assert unprotect_access_level is AccessLevel.MAINTAINER.value def test_protect_branch_with_old_api_next_update_with_new_api_and_unprotect( self, gitlab, group_and_project, branches ): config_protect_branch_with_old_api = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: true developers_can_push: true developers_can_merge: true """ run_gitlabform(config_protect_branch_with_old_api, group_and_project) branch = gitlab.get_branch(group_and_project, "protect_branch") assert branch["protected"] is True assert branch["developers_can_push"] is True assert branch["developers_can_merge"] is True config_protect_branch_with_new_api = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: true push_access_level: {AccessLevel.NO_ACCESS.value} merge_access_level: {AccessLevel.MAINTAINER.value} unprotect_access_level: {AccessLevel.MAINTAINER.value} """ run_gitlabform(config_protect_branch_with_new_api, group_and_project) ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels(group_and_project, "protect_branch") assert push_access_levels == [AccessLevel.NO_ACCESS.value] assert merge_access_levels == [AccessLevel.MAINTAINER.value] assert push_access_user_ids == [] assert merge_access_user_ids == [] assert unprotect_access_level is AccessLevel.MAINTAINER.value config_protect_branch_unprotect = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: false """ run_gitlabform(config_protect_branch_unprotect, group_and_project) branch = gitlab.get_branch(group_and_project, "protect_branch") assert branch["protected"] is False def test_protect_branch_with_new_api_next_update_with_old_api_and_unprotect( self, gitlab, group_and_project, branches ): config_protect_branch_with_new_api = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: true push_access_level: {AccessLevel.NO_ACCESS.value} merge_access_level: {AccessLevel.MAINTAINER.value} unprotect_access_level: {AccessLevel.MAINTAINER.value} """ run_gitlabform(config_protect_branch_with_new_api, group_and_project) ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels(group_and_project, "protect_branch") assert push_access_levels == [AccessLevel.NO_ACCESS.value] assert merge_access_levels == [AccessLevel.MAINTAINER.value] assert push_access_user_ids == [] assert merge_access_user_ids == [] assert unprotect_access_level is AccessLevel.MAINTAINER.value config_protect_branch_with_old_api = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: true developers_can_push: true developers_can_merge: true """ run_gitlabform(config_protect_branch_with_old_api, group_and_project) branch = gitlab.get_branch(group_and_project, "protect_branch") assert branch["protected"] is True assert branch["developers_can_push"] is True assert branch["developers_can_merge"] is True config_protect_branch_unprotect = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: false """ run_gitlabform(config_protect_branch_unprotect, group_and_project) branch = gitlab.get_branch(group_and_project, "protect_branch") assert branch["protected"] is False @pytest.mark.skipif( gl.has_no_license(), reason="this test requires a GitLab license (Paid/Trial)" ) def test_protect_branch_with_old_api_next_update_with_new_api_and_userid_and_unprotect( self, gitlab, group_and_project, branches, users, one_maintainer_and_two_developers, ): config_protect_branch_with_old_api = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: true developers_can_push: true developers_can_merge: true """ run_gitlabform(config_protect_branch_with_old_api, group_and_project) <|fim▁hole|> assert branch["developers_can_push"] is True assert branch["developers_can_merge"] is True user_allowed_to_push_id = gitlab.get_user_to_protect_branch(users[0]) user_allowed_to_merge_id = gitlab.get_user_to_protect_branch(users[1]) config_protect_branch_with_new_api = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: true push_access_level: {AccessLevel.DEVELOPER.value} merge_access_level: {AccessLevel.MAINTAINER.value} allowed_to_push: - access_level: {AccessLevel.MAINTAINER.value} - user_id: {user_allowed_to_push_id} - user: {users[1]} allowed_to_merge: - access_level: {AccessLevel.MAINTAINER.value} - user_id: {user_allowed_to_merge_id} unprotect_access_level: {AccessLevel.MAINTAINER.value} """ run_gitlabform(config_protect_branch_with_new_api, group_and_project) ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels(group_and_project, "protect_branch") assert push_access_levels == [ AccessLevel.DEVELOPER.value, AccessLevel.MAINTAINER.value, ] assert merge_access_levels == [AccessLevel.MAINTAINER.value] current_push_access_user_ids = [ user_allowed_to_push_id, user_allowed_to_merge_id, ] current_push_access_user_ids.sort() assert push_access_user_ids == current_push_access_user_ids current_merge_access_user_ids = [user_allowed_to_merge_id] current_merge_access_user_ids.sort() assert merge_access_user_ids == current_merge_access_user_ids assert unprotect_access_level is AccessLevel.MAINTAINER.value config_protect_branch_unprotect = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: false """ run_gitlabform(config_protect_branch_unprotect, group_and_project) branch = gitlab.get_branch(group_and_project, "protect_branch") assert branch["protected"] is False def test_unprotect_when_the_rest_of_the_parameters_are_still_specified_old_api( self, gitlab, group_and_project, branches ): config_protect_branch_with_old_api = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: true developers_can_push: true developers_can_merge: true """ run_gitlabform(config_protect_branch_with_old_api, group_and_project) branch = gitlab.get_branch(group_and_project, "protect_branch") assert branch["protected"] is True assert branch["developers_can_push"] is True assert branch["developers_can_merge"] is True config_unprotect_branch_with_old_api = f""" gitlab: api_version: 4 projects_and_groups: {group_and_project}: branches: protect_branch: protected: false developers_can_push: true developers_can_merge: true """ run_gitlabform(config_unprotect_branch_with_old_api, group_and_project) branch = gitlab.get_branch(group_and_project, "protect_branch") assert branch["protected"] is False def test_unprotect_when_the_rest_of_the_parameters_are_still_specified_new_api( self, gitlab, group_and_project, branches ): config_protect_branch_with_new_api = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: true push_access_level: {AccessLevel.NO_ACCESS.value} merge_access_level: {AccessLevel.MAINTAINER.value} unprotect_access_level: {AccessLevel.MAINTAINER.value} """ run_gitlabform(config_protect_branch_with_new_api, group_and_project) ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels(group_and_project, "protect_branch") assert push_access_levels == [AccessLevel.NO_ACCESS.value] assert merge_access_levels == [AccessLevel.MAINTAINER.value] assert push_access_user_ids == [] assert merge_access_user_ids == [] assert unprotect_access_level is AccessLevel.MAINTAINER.value config_unprotect_branch_with_new_api = f""" projects_and_groups: {group_and_project}: branches: protect_branch: protected: false push_access_level: {AccessLevel.NO_ACCESS.value} merge_access_level: {AccessLevel.MAINTAINER.value} unprotect_access_level: {AccessLevel.MAINTAINER.value} """ run_gitlabform(config_unprotect_branch_with_new_api, group_and_project) # old API branch = gitlab.get_branch(group_and_project, "protect_branch") assert branch["protected"] is False # new API ( push_access_levels, merge_access_levels, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels(group_and_project, "protect_branch") assert push_access_levels is None assert merge_access_levels is None assert push_access_user_ids is None assert merge_access_user_ids is None assert unprotect_access_level is None def test__config_with_access_level_names(self, gitlab, group_and_project, branches): config_with_access_levels_names = f""" projects_and_groups: {group_and_project}: branches: protect_branch_and_allow_merges_access_levels: protected: true push_access_level: no_access # note "_" or " " and the various merge_access_level: Developer # case in each line. it should not unprotect_access_level: MAINTAINER # matter as we allow any case. """ run_gitlabform(config_with_access_levels_names, group_and_project) ( push_access_level, merge_access_level, push_access_user_ids, merge_access_user_ids, unprotect_access_level, ) = gitlab.get_only_branch_access_levels( group_and_project, "protect_branch_and_allow_merges_access_levels" ) assert push_access_level == [AccessLevel.NO_ACCESS.value] assert merge_access_level == [AccessLevel.DEVELOPER.value] assert push_access_user_ids == [] assert merge_access_user_ids == [] assert unprotect_access_level is AccessLevel.MAINTAINER.value<|fim▁end|>
branch = gitlab.get_branch(group_and_project, "protect_branch") assert branch["protected"] is True
<|file_name|>_random_over_sampler.py<|end_file_name|><|fim▁begin|>"""Class to perform random over-sampling.""" # Authors: Guillaume Lemaitre <[email protected]> # Christos Aridas # License: MIT from collections.abc import Mapping from numbers import Real import numpy as np from scipy import sparse from sklearn.utils import check_array, check_random_state from sklearn.utils import _safe_indexing from sklearn.utils.sparsefuncs import mean_variance_axis from .base import BaseOverSampler from ..utils import check_target_type from ..utils import Substitution from ..utils._docstring import _random_state_docstring from ..utils._validation import _deprecate_positional_args @Substitution( sampling_strategy=BaseOverSampler._sampling_strategy_docstring, random_state=_random_state_docstring, ) class RandomOverSampler(BaseOverSampler): """Class to perform random over-sampling. Object to over-sample the minority class(es) by picking samples at random with replacement. The bootstrap can be generated in a smoothed manner. Read more in the :ref:`User Guide <random_over_sampler>`. Parameters ---------- {sampling_strategy} {random_state} shrinkage : float or dict, default=None Parameter controlling the shrinkage applied to the covariance matrix. when a smoothed bootstrap is generated. The options are: - if `None`, a normal bootstrap will be generated without perturbation. It is equivalent to `shrinkage=0` as well; - if a `float` is given, the shrinkage factor will be used for all classes to generate the smoothed bootstrap; - if a `dict` is given, the shrinkage factor will specific for each class. The key correspond to the targeted class and the value is the shrinkage factor. The value needs of the shrinkage parameter needs to be higher or equal to 0. .. versionadded:: 0.8 Attributes ---------- sampling_strategy_ : dict Dictionary containing the information to sample the dataset. The keys corresponds to the class labels from which to sample and the values are the number of samples to sample. sample_indices_ : ndarray of shape (n_new_samples,) Indices of the samples selected. .. versionadded:: 0.4 shrinkage_ : dict or None The per-class shrinkage factor used to generate the smoothed bootstrap sample. When `shrinkage=None` a normal bootstrap will be generated. .. versionadded:: 0.8 n_features_in_ : int Number of features in the input dataset. .. versionadded:: 0.9 See Also -------- BorderlineSMOTE : Over-sample using the borderline-SMOTE variant. SMOTE : Over-sample using SMOTE. SMOTENC : Over-sample using SMOTE for continuous and categorical features. SMOTEN : Over-sample using the SMOTE variant specifically for categorical features only. SVMSMOTE : Over-sample using SVM-SMOTE variant. ADASYN : Over-sample using ADASYN. KMeansSMOTE : Over-sample applying a clustering before to oversample using SMOTE. Notes ----- Supports multi-class resampling by sampling each class independently. Supports heterogeneous data as object array containing string and numeric data. When generating a smoothed bootstrap, this method is also known as Random Over-Sampling Examples (ROSE) [1]_. .. warning:: Since smoothed bootstrap are generated by adding a small perturbation to the drawn samples, this method is not adequate when working with sparse matrices. References ---------- .. [1] G Menardi, N. Torelli, "Training and assessing classification rules with imbalanced data," Data Mining and Knowledge Discovery, 28(1), pp.92-122, 2014. Examples -------- >>> from collections import Counter >>> from sklearn.datasets import make_classification >>> from imblearn.over_sampling import \ RandomOverSampler # doctest: +NORMALIZE_WHITESPACE >>> X, y = make_classification(n_classes=2, class_sep=2, ... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0, ... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10) >>> print('Original dataset shape %s' % Counter(y)) Original dataset shape Counter({{1: 900, 0: 100}}) >>> ros = RandomOverSampler(random_state=42) >>> X_res, y_res = ros.fit_resample(X, y) >>> print('Resampled dataset shape %s' % Counter(y_res)) Resampled dataset shape Counter({{0: 900, 1: 900}}) """ @_deprecate_positional_args def __init__( self, *, sampling_strategy="auto", random_state=None, shrinkage=None, ): super().__init__(sampling_strategy=sampling_strategy) self.random_state = random_state self.shrinkage = shrinkage def _check_X_y(self, X, y): y, binarize_y = check_target_type(y, indicate_one_vs_all=True) X, y = self._validate_data( X, y, reset=True, accept_sparse=["csr", "csc"], dtype=None, force_all_finite=False, ) return X, y, binarize_y def _fit_resample(self, X, y): random_state = check_random_state(self.random_state) if isinstance(self.shrinkage, Real): self.shrinkage_ = { klass: self.shrinkage for klass in self.sampling_strategy_ } elif self.shrinkage is None or isinstance(self.shrinkage, Mapping): self.shrinkage_ = self.shrinkage else: raise ValueError( f"`shrinkage` should either be a positive floating number or " f"a dictionary mapping a class to a positive floating number. " f"Got {repr(self.shrinkage)} instead." ) if self.shrinkage_ is not None: missing_shrinkage_keys = ( self.sampling_strategy_.keys() - self.shrinkage_.keys() ) if missing_shrinkage_keys: raise ValueError( f"`shrinkage` should contain a shrinkage factor for " f"each class that will be resampled. The missing "<|fim▁hole|> for klass, shrink_factor in self.shrinkage_.items(): if shrink_factor < 0: raise ValueError( f"The shrinkage factor needs to be >= 0. " f"Got {shrink_factor} for class {klass}." ) # smoothed bootstrap imposes to make numerical operation; we need # to be sure to have only numerical data in X try: X = check_array(X, accept_sparse=["csr", "csc"], dtype="numeric") except ValueError as exc: raise ValueError( "When shrinkage is not None, X needs to contain only " "numerical data to later generate a smoothed bootstrap " "sample." ) from exc X_resampled = [X.copy()] y_resampled = [y.copy()] sample_indices = range(X.shape[0]) for class_sample, num_samples in self.sampling_strategy_.items(): target_class_indices = np.flatnonzero(y == class_sample) bootstrap_indices = random_state.choice( target_class_indices, size=num_samples, replace=True, ) sample_indices = np.append(sample_indices, bootstrap_indices) if self.shrinkage_ is not None: # generate a smoothed bootstrap with a perturbation n_samples, n_features = X.shape smoothing_constant = (4 / ((n_features + 2) * n_samples)) ** ( 1 / (n_features + 4) ) if sparse.issparse(X): _, X_class_variance = mean_variance_axis( X[target_class_indices, :], axis=0, ) X_class_scale = np.sqrt(X_class_variance, out=X_class_variance) else: X_class_scale = np.std(X[target_class_indices, :], axis=0) smoothing_matrix = np.diagflat( self.shrinkage_[class_sample] * smoothing_constant * X_class_scale ) X_new = random_state.randn(num_samples, n_features) X_new = X_new.dot(smoothing_matrix) + X[bootstrap_indices, :] if sparse.issparse(X): X_new = sparse.csr_matrix(X_new, dtype=X.dtype) X_resampled.append(X_new) else: # generate a bootstrap X_resampled.append(_safe_indexing(X, bootstrap_indices)) y_resampled.append(_safe_indexing(y, bootstrap_indices)) self.sample_indices_ = np.array(sample_indices) if sparse.issparse(X): X_resampled = sparse.vstack(X_resampled, format=X.format) else: X_resampled = np.vstack(X_resampled) y_resampled = np.hstack(y_resampled) return X_resampled, y_resampled def _more_tags(self): return { "X_types": ["2darray", "string", "sparse", "dataframe"], "sample_indices": True, "allow_nan": True, }<|fim▁end|>
f"classes are: {repr(missing_shrinkage_keys)}" )
<|file_name|>traces.py<|end_file_name|><|fim▁begin|># Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from st2api.controllers.resource import ResourceController from st2common.models.api.trace import TraceAPI from st2common.persistence.trace import Trace from st2common.rbac.types import PermissionType __all__ = ["TracesController"] class TracesController(ResourceController): model = TraceAPI access = Trace supported_filters = { "trace_tag": "trace_tag", "execution": "action_executions.object_id", "rule": "rules.object_id", "trigger_instance": "trigger_instances.object_id", } query_options = {"sort": ["-start_timestamp", "trace_tag"]} def get_all( self,<|fim▁hole|> sort=None, offset=0, limit=None, requester_user=None, **raw_filters, ): # Use a custom sort order when filtering on a timestamp so we return a correct result as # expected by the user query_options = None if "sort_desc" in raw_filters and raw_filters["sort_desc"] == "True": query_options = {"sort": ["-start_timestamp", "trace_tag"]} elif "sort_asc" in raw_filters and raw_filters["sort_asc"] == "True": query_options = {"sort": ["+start_timestamp", "trace_tag"]} return self._get_all( exclude_fields=exclude_attributes, include_fields=include_attributes, sort=sort, offset=offset, limit=limit, query_options=query_options, raw_filters=raw_filters, requester_user=requester_user, ) def get_one(self, id, requester_user): return self._get_one_by_id( id, requester_user=requester_user, permission_type=PermissionType.TRACE_VIEW ) traces_controller = TracesController()<|fim▁end|>
exclude_attributes=None, include_attributes=None,
<|file_name|>pio_uart_tx.py<|end_file_name|><|fim▁begin|># Example using PIO to create a UART TX interface from machine import Pin from rp2 import PIO, StateMachine, asm_pio UART_BAUD = 115200 PIN_BASE = 10 NUM_UARTS = 8 @asm_pio(sideset_init=PIO.OUT_HIGH, out_init=PIO.OUT_HIGH, out_shiftdir=PIO.SHIFT_RIGHT) def uart_tx(): # fmt: off # Block with TX deasserted until data available pull() # Initialise bit counter, assert start bit for 8 cycles set(x, 7) .side(0) [7] # Shift out 8 data bits, 8 execution cycles per bit label("bitloop") out(pins, 1) [6] jmp(x_dec, "bitloop") # Assert stop bit for 8 cycles total (incl 1 for pull()) nop() .side(1) [6] # fmt: on # Now we add 8 UART TXs, on pins 10 to 17. Use the same baud rate for all of them. uarts = [] for i in range(NUM_UARTS): sm = StateMachine( i, uart_tx, freq=8 * UART_BAUD, sideset_base=Pin(PIN_BASE + i), out_base=Pin(PIN_BASE + i) ) sm.active(1) uarts.append(sm) # We can print characters from each UART by pushing them to the TX FIFO def pio_uart_print(sm, s):<|fim▁hole|> # Print a different message from each UART for i, u in enumerate(uarts): pio_uart_print(u, "Hello from UART {}!\n".format(i))<|fim▁end|>
for c in s: sm.put(ord(c))
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use std::fmt::Debug; use std::fmt::Binary; pub fn println<T>(a: T) where<|fim▁hole|> T: Debug, { print!("{:?}\n", a); } pub fn println_binary<T>(a: T) where T: Binary, { print!("{:#b}\n", a); }<|fim▁end|>
<|file_name|>maven_artifact.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com> # # Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact # as a reference and starting point. # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: maven_artifact short_description: Downloads an Artifact from a Maven Repository version_added: "2.0" description: - Downloads an artifact from a maven repository given the maven coordinates provided to the module. - Can retrieve snapshots or release versions of the artifact and will resolve the latest available version if one is not available. author: "Chris Schmidt (@chrisisbeef)" requirements: - "python >= 2.6" - lxml - boto if using a S3 repository (s3://...) options: group_id: description: - The Maven groupId coordinate required: true artifact_id: description: - The maven artifactId coordinate required: true version: description: - The maven version coordinate required: false default: latest classifier: description: - The maven classifier coordinate required: false default: null extension: description: - The maven type/extension coordinate required: false default: jar repository_url: description: - The URL of the Maven Repository to download from. - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2. required: false default: http://repo1.maven.org/maven2 username: description: - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3 required: false default: null aliases: [ "aws_secret_key" ] password: description: - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3 required: false default: null aliases: [ "aws_secret_access_key" ] dest: description: - The path where the artifact should be written to - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file required: true default: false state: description: - The desired state of the artifact required: true default: present choices: [present,absent] timeout: description: - Specifies a timeout in seconds for the connection attempt required: false default: 10 version_added: "2.3" validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists. required: false default: 'yes' choices: ['yes', 'no'] version_added: "1.9.3" keep_name: description: - If C(yes), the downloaded artifact's name is preserved, i.e the version number remains part of it. - This option only has effect when C(dest) is a directory and C(version) is set to C(latest). required: false default: 'no' choices: ['yes', 'no'] version_added: "2.4" extends_documentation_fragment: - files ''' EXAMPLES = ''' # Download the latest version of the JUnit framework artifact from Maven Central - maven_artifact: group_id: junit artifact_id: junit dest: /tmp/junit-latest.jar # Download JUnit 4.11 from Maven Central - maven_artifact: group_id: junit artifact_id: junit version: 4.11 dest: /tmp/junit-4.11.jar # Download an artifact from a private repository requiring authentication - maven_artifact: group_id: com.company artifact_id: library-name repository_url: 'https://repo.company.com/maven' username: user password: pass dest: /tmp/library-name-latest.jar # Download a WAR File to the Tomcat webapps directory to be deployed - maven_artifact: group_id: com.company artifact_id: web-app extension: war repository_url: 'https://repo.company.com/maven' dest: /var/lib/tomcat7/webapps/web-app.war # Keep a downloaded artifact's name, i.e. retain the version - maven_artifact: version: latest artifact_id: spring-core group_id: org.springframework dest: /tmp/ keep_name: yes ''' import hashlib import os import posixpath import sys from lxml import etree try: import boto3 HAS_BOTO = True except ImportError: HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.module_utils.urls import fetch_url from ansible.module_utils._text import to_bytes def split_pre_existing_dir(dirname): ''' Return the first pre-existing directory and a list of the new directories that will be created. ''' head, tail = os.path.split(dirname) b_head = to_bytes(head, errors='surrogate_or_strict') if not os.path.exists(b_head): (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head) else: return head, [tail] new_directory_list.append(tail) return pre_existing_dir, new_directory_list def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed): ''' Walk the new directories list and make sure that permissions are as we would expect ''' if new_directory_list: working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0)) directory_args['path'] = working_dir changed = module.set_fs_attributes_if_different(directory_args, changed) changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed) return changed class Artifact(object): def __init__(self, group_id, artifact_id, version, classifier='', extension='jar'): if not group_id: raise ValueError("group_id must be set") if not artifact_id: raise ValueError("artifact_id must be set") self.group_id = group_id self.artifact_id = artifact_id self.version = version self.classifier = classifier if not extension: self.extension = "jar" else: self.extension = extension def is_snapshot(self): return self.version and self.version.endswith("SNAPSHOT") def path(self, with_version=True): base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id) if with_version and self.version: base = posixpath.join(base, self.version) return base def _generate_filename(self): filename = self.artifact_id + "-" + self.classifier + "." + self.extension if not self.classifier: filename = self.artifact_id + "." + self.extension return filename def get_filename(self, filename=None): if not filename: filename = self._generate_filename() elif os.path.isdir(filename): filename = os.path.join(filename, self._generate_filename()) return filename def __str__(self): result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) if self.classifier: result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) elif self.extension != "jar": result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) return result @staticmethod def parse(input): parts = input.split(":") if len(parts) >= 3: g = parts[0] a = parts[1] v = parts[len(parts) - 1] t = None c = None if len(parts) == 4: t = parts[2] if len(parts) == 5: t = parts[2] c = parts[3] return Artifact(g, a, v, c, t) else: return None class MavenDownloader: def __init__(self, module, base="http://repo1.maven.org/maven2"): self.module = module if base.endswith("/"): base = base.rstrip("/") self.base = base self.user_agent = "Maven Artifact Downloader/1.0" self.latest_version_found = None def find_latest_version_available(self, artifact): if self.latest_version_found: return self.latest_version_found path = "/%s/maven-metadata.xml" % (artifact.path(False)) xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse) v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") if v: self.latest_version_found = v[0] return v[0] def find_uri_for_artifact(self, artifact): if artifact.version == "latest": artifact.version = self.find_latest_version_available(artifact) if artifact.is_snapshot(): path = "/%s/maven-metadata.xml" % (artifact.path()) xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse) timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0] buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"): classifier = snapshotArtifact.xpath("classifier/text()") artifact_classifier = classifier[0] if classifier else '' extension = snapshotArtifact.xpath("extension/text()") artifact_extension = extension[0] if extension else '' if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension: return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0]) return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber)) return self._uri_for_artifact(artifact, artifact.version) def _uri_for_artifact(self, artifact, version=None): if artifact.is_snapshot() and not version: raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) elif not artifact.is_snapshot(): version = artifact.version if artifact.classifier: return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension) return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension) def _request(self, url, failmsg, f): url_to_use = url parsed_url = urlparse(url) if parsed_url.scheme=='s3': parsed_url = urlparse(url) bucket_name = parsed_url.netloc key_name = parsed_url.path[1:] client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', '')) url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10) req_timeout = self.module.params.get('timeout') # Hack to add parameters in the way that fetch_url expects self.module.params['url_username'] = self.module.params.get('username', '') self.module.params['url_password'] = self.module.params.get('password', '') self.module.params['http_agent'] = self.module.params.get('user_agent', None) response, info = fetch_url(self.module, url_to_use, timeout=req_timeout) if info['status'] != 200: raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use) else: return f(response) def download(self, artifact, filename=None): filename = artifact.get_filename(filename) if not artifact.version or artifact.version == "latest": artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), artifact.classifier, artifact.extension) url = self.find_uri_for_artifact(artifact) result = True if not self.verify_md5(filename, url + ".md5"): response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r) if response: f = open(filename, 'w') # f.write(response.read()) self._write_chunks(response, f, report_hook=self.chunk_report) f.close() else: result = False return result def chunk_report(self, bytes_so_far, chunk_size, total_size): percent = float(bytes_so_far) / total_size percent = round(percent * 100, 2) sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" % (bytes_so_far, total_size, percent)) if bytes_so_far >= total_size: sys.stdout.write('\n') def _write_chunks(self, response, file, chunk_size=8192, report_hook=None): total_size = response.info().getheader('Content-Length').strip() total_size = int(total_size) bytes_so_far = 0 while 1: chunk = response.read(chunk_size) bytes_so_far += len(chunk) if not chunk: break file.write(chunk) if report_hook: report_hook(bytes_so_far, chunk_size, total_size) return bytes_so_far<|fim▁hole|> def verify_md5(self, file, remote_md5): result = False if os.path.exists(file): local_md5 = self._local_md5(file) remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read()) result = local_md5 == remote return result def _local_md5(self, file): md5 = hashlib.md5() f = open(file, 'rb') for chunk in iter(lambda: f.read(8192), ''): md5.update(chunk) f.close() return md5.hexdigest() def main(): module = AnsibleModule( argument_spec = dict( group_id = dict(default=None), artifact_id = dict(default=None), version = dict(default="latest"), classifier = dict(default=''), extension = dict(default='jar'), repository_url = dict(default=None), username = dict(default=None,aliases=['aws_secret_key']), password = dict(default=None, no_log=True,aliases=['aws_secret_access_key']), state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state timeout = dict(default=10, type='int'), dest = dict(type="path", default=None), validate_certs = dict(required=False, default=True, type='bool'), keep_name = dict(required=False, default=False, type='bool'), ), add_file_common_args=True ) repository_url = module.params["repository_url"] if not repository_url: repository_url = "http://repo1.maven.org/maven2" try: parsed_url = urlparse(repository_url) except AttributeError as e: module.fail_json(msg='url parsing went wrong %s' % e) if parsed_url.scheme=='s3' and not HAS_BOTO: module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs') group_id = module.params["group_id"] artifact_id = module.params["artifact_id"] version = module.params["version"] classifier = module.params["classifier"] extension = module.params["extension"] state = module.params["state"] dest = module.params["dest"] b_dest = to_bytes(dest, errors='surrogate_or_strict') keep_name = module.params["keep_name"] #downloader = MavenDownloader(module, repository_url, repository_username, repository_password) downloader = MavenDownloader(module, repository_url) try: artifact = Artifact(group_id, artifact_id, version, classifier, extension) except ValueError as e: module.fail_json(msg=e.args[0]) changed = False prev_state = "absent" if dest.endswith(os.sep): b_dest = to_bytes(dest, errors='surrogate_or_strict') if not os.path.exists(b_dest): (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest) os.makedirs(b_dest) directory_args = module.load_file_common_arguments(module.params) directory_mode = module.params["directory_mode"] if directory_mode is not None: directory_args['mode'] = directory_mode else: directory_args['mode'] = None changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed) if os.path.isdir(b_dest): version_part = version if keep_name and version == 'latest': version_part = downloader.find_latest_version_available(artifact) if classifier: dest = posixpath.join(dest, "%s-%s-%s.%s" % (artifact_id, version_part, classifier, extension)) else: dest = posixpath.join(dest, "%s-%s.%s" % (artifact_id, version_part, extension)) b_dest = to_bytes(dest, errors='surrogate_or_strict') if os.path.lexists(b_dest) and downloader.verify_md5(dest, downloader.find_uri_for_artifact(artifact) + '.md5'): prev_state = "present" if prev_state == "absent": try: if downloader.download(artifact, b_dest): changed = True else: module.fail_json(msg="Unable to download the artifact") except ValueError as e: module.fail_json(msg=e.args[0]) module.params['dest'] = dest file_args = module.load_file_common_arguments(module.params) changed = module.set_fs_attributes_if_different(file_args, changed) if changed: module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=changed) else: module.exit_json(state=state, dest=dest, changed=changed) if __name__ == '__main__': main()<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>""" Plugin: Slideshow ***************** This plugin allows you to put a slideshow on a page, automatically displaying the selected image files with customizable transitions and intervals. Installation<|fim▁hole|>To use this plugin, put ``media_tree.contrib.cms_plugins.media_tree_slideshow`` in your installed apps, and run ``manage.py syncdb``. Template ======== Override the template ``cms/plugins/media_tree_slideshow.html`` if you want to customize the output. Please take a look at the default template for more information. By default, images are rendered to the output using the template ``media_tree/filenode/includes/figure.html``, which includes captions. .. Note:: The default template requires you to include `jQuery <http://jquery.com/>`_ in your pages, since it uses the `jQuery Cycle Plugin <http://jquery.malsup.com/cycle/>`_ (bundled) for image transitions. """<|fim▁end|>
============
<|file_name|>test_marshal.py<|end_file_name|><|fim▁begin|>from __future__ import print_function, absolute_import import random import unittest from pysmoke import marshal from pysmoke.smoke import ffi, Type, TypedValue, pystring, smokec, not_implemented, charp, dbg from pysmoke import QtCore, QtGui qtcore = QtCore.__binding__ qtgui = QtGui.__binding__ class MarshalTestCase(unittest.TestCase): def setUp(self):<|fim▁hole|> pass def test_qstring(self): qstr = marshal.QString.from_py('aqstring') print(qstr) pstr = marshal.QString.to_py(qstr) #dbg() self.assertEqual(pstr, 'aqstring') import gc; gc.collect() qstr2 = marshal.QString.from_py(pstr) print('QS:', qstr, pstr, qstr2, marshal.QString.to_py(qstr)) obj = QtGui.QObject() print('obj', obj.__cval__.value.s_voidp) obj.setObjectName('my_object') self.assertEqual(obj.objectName(), 'my_object') if __name__ == '__main__': unittest.main()<|fim▁end|>
pass def tearDown(self):
<|file_name|>SimpleGameLogic.cpp<|end_file_name|><|fim▁begin|>#include "SimpleGameLogic.h" #include "GameWorld.h" #include "MonstersPlace.h" void SimpleGameLogic::worldLoaded() { _physicsWorld = _world->getGameContent()->getPhysicsWorld(); _physicsWorld->setCollisionCallback(this); _tank = static_cast<Tank*>(_world->getGameContent()->getObjectByName("Player")); ControllerManager::getInstance()->registerListener(this); std::vector<MonstersPlace*> monstersPlaces = _world->getGameContent()->getObjectsByTypeName<MonstersPlace>(GameObjectType::MONSTERS_PLACE); for (auto monstersPlace : monstersPlaces) { MonstersPlaceHandler *handler = new MonstersPlaceHandler(_world, monstersPlace, _tank); _handlers.push_back(handler); } } void SimpleGameLogic::update(float delta) { _physicsWorld->update(delta); for (auto handler : _handlers) { handler->update(delta); } } void SimpleGameLogic::onKeyDown(EventKeyboard::KeyCode keyCode) { if (keyCode == EventKeyboard::KeyCode::KEY_LEFT_ARROW) { _tank->moveLeft(); } else if (keyCode == EventKeyboard::KeyCode::KEY_RIGHT_ARROW) { _tank->moveRight(); } else if (keyCode == EventKeyboard::KeyCode::KEY_UP_ARROW) { _tank->moveForward(); } else if (keyCode == EventKeyboard::KeyCode::KEY_DOWN_ARROW) { _tank->moveBackward(); } else if (keyCode == EventKeyboard::KeyCode::KEY_X) { _tank->fire(); } } void SimpleGameLogic::onKeyPress(EventKeyboard::KeyCode keyCode) { if (keyCode == EventKeyboard::KeyCode::KEY_Q) { _tank->prevWeapon(); } else if (keyCode == EventKeyboard::KeyCode::KEY_W) { _tank->nextWeapon(); } } void SimpleGameLogic::onKeyUp(EventKeyboard::KeyCode keyCode) { if (keyCode == EventKeyboard::KeyCode::KEY_LEFT_ARROW) { _tank->stopMoveLeft(); } else if (keyCode == EventKeyboard::KeyCode::KEY_RIGHT_ARROW) { _tank->stopMoveRight(); } else if (keyCode == EventKeyboard::KeyCode::KEY_UP_ARROW) { _tank->stopMoveBackward(); } else if (keyCode == EventKeyboard::KeyCode::KEY_DOWN_ARROW) { _tank->stopMoveBackward(); } } void SimpleGameLogic::onPointsBeginContact(SimplePhysicsPoint* pointA, SimplePhysicsPoint* pointB) { BaseGameObject *gameObjectA = static_cast<BaseGameObject*>(pointA->getUserData()); BaseGameObject *gameObjectB = static_cast<BaseGameObject*>(pointB->getUserData()); //ToDo êàê-òî íàäî îáîéòè ýòó ïðîâåðêó if (gameObjectA->getType() == GameObjectType::TANK && gameObjectB->getType() == GameObjectType::TANK_BULLET || gameObjectB->getType() == GameObjectType::TANK && gameObjectA->getType() == GameObjectType::TANK_BULLET) { return; } if (isMonster(gameObjectA) && isMonster(gameObjectB)) { return; } DamageableObject *damageableObjectA = dynamic_cast<DamageableObject*>(gameObjectA); DamageObject *damageObjectB = dynamic_cast<DamageObject*>(gameObjectB); if (damageableObjectA && damageObjectB) { DamageInfo *damageInfo = damageObjectB->getDamageInfo(); damageableObjectA->damage(damageInfo); damageObjectB->onAfterDamage(damageableObjectA); delete damageInfo; } DamageableObject *damageableObjectB = dynamic_cast<DamageableObject*>(gameObjectB); DamageObject *damageObjectA = dynamic_cast<DamageObject*>(gameObjectA); if (damageableObjectB && damageObjectA) { DamageInfo *damageInfo = damageObjectA->getDamageInfo(); damageableObjectB->damage(damageInfo);<|fim▁hole|> damageObjectA->onAfterDamage(damageableObjectB); delete damageInfo; } } void SimpleGameLogic::onPointReachedBorder(SimplePhysicsPoint* point) { BaseGameObject *gameObject = static_cast<BaseGameObject*>(point->getUserData()); if (gameObject) { if (gameObject->getType() == GameObjectType::TANK_BULLET) { scheduleOnce([=](float dt){ gameObject->detachFromWorld(); delete gameObject; }, 0.0f, "DestroyGameObject"); } } } bool SimpleGameLogic::isMonster(BaseGameObject *gameObject) { return gameObject->getType() == GameObjectType::MONSTER1 || gameObject->getType() == GameObjectType::MONSTER2 || gameObject->getType() == GameObjectType::MONSTER3; }<|fim▁end|>
<|file_name|>unboxed-closures-direct-sugary-call.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT.<|fim▁hole|>// option. This file may not be copied, modified, or distributed // except according to those terms. // pretty-expanded FIXME #23616 #![feature(unboxed_closures)] fn main() { let mut unboxed = || {}; unboxed(); }<|fim▁end|>
// // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
<|file_name|>eventon_gen_maps.js<|end_file_name|><|fim▁begin|>/* EventON Generate Google maps function */ (function($){ $.fn.evoGenmaps = function(opt){ var defaults = { delay: 0, fnt: 1, cal: '', mapSpotId: '', _action:'' }; var options = $.extend({}, defaults, opt); var geocoder; // popup lightbox generation if(options._action=='lightbox'){ var cur_window_top = parseInt($(window).scrollTop()) + 50; $('.evo_popin').css({'margin-top':cur_window_top}); $('.evo_pop_body').html(''); var event_list = this.closest('.eventon_events_list'); var content = this.siblings('.event_description').html(); var content_front = this.html(); var _content = $(content).not('.evcal_close'); // RTL if(event_list.hasClass('evortl')){ $('.evo_popin').addClass('evortl'); } $('.evo_pop_body').append('<div class="evopop_top">'+content_front+'</div>').append(_content); var this_map = $('.evo_pop_body').find('.evcal_gmaps'); var idd = this_map.attr('id'); this_map.attr({'id':idd+'_evop'}); $('.evo_popup').fadeIn(300); $('.evo_popbg').fadeIn(300); // check if gmaps should run if( this.attr('data-gmtrig')=='1' && this.attr('data-gmap_status')!='null'){ var cal = this.closest('div.ajde_evcal_calendar '); loadl_gmaps_in(this, cal, idd+'_evop'); } <|fim▁hole|> // functions if(options.fnt==1){ this.each(function(){ var eventcard = $(this).attr('eventcard'); if(eventcard=='1'){ $(this).find('a.desc_trig').each(function(elm){ //$(this).siblings('.event_description').slideDown(); var obj = $(this); if(options.delay==0){ load_googlemaps_here(obj); }else{ setTimeout(load_googlemaps_here, options.delay, obj); } }); } }); } if(options.fnt==2){ if(options.delay==0){ load_googlemaps_here(this); }else{ setTimeout(load_googlemaps_here, options.delay, this); } } if(options.fnt==3){ loadl_gmaps_in(this, options.cal, ''); } // gmaps on popup if(options.fnt==4){ // check if gmaps should run if( this.attr('data-gmtrig')=='1' && this.attr('data-gmap_status')!='null'){ var cal = this.closest('div.ajde_evcal_calendar '); loadl_gmaps_in(this, cal, options.mapSpotId); } } // function to load google maps for eventcard function load_googlemaps_here(obj){ if( obj.data('gmstat')!= '1'){ obj.attr({'data-gmstat':'1'}); } var cal = obj.closest('div.ajde_evcal_calendar '); if( obj.attr('data-gmtrig')=='1' && obj.attr('data-gmap_status')!='null'){ loadl_gmaps_in(obj, cal, ''); } } // Load the google map on the object function loadl_gmaps_in(obj, cal, mapId){ var evodata = cal.find('.evo-data'); var mapformat = evodata.data('mapformat'); var ev_location = obj.find('.evcal_desc'); var location_type = ev_location.attr('data-location_type'); if(location_type=='address'){ var address = ev_location.attr('data-location_address'); var location_type = 'add'; }else{ var address = ev_location.attr('data-latlng'); var location_type = 'latlng'; } var map_canvas_id= (mapId!=='')? mapId: obj.siblings('.event_description').find('.evcal_gmaps').attr('id'); // google maps styles // @since 2.2.22 var styles = ''; if(gmapstyles != 'default'){ styles = $.parseJSON(gmapstyles); } var zoom = evodata.data('mapzoom'); var zoomlevel = (typeof zoom !== 'undefined' && zoom !== false)? parseInt(zoom):12; var scroll = evodata.data('mapscroll'); //console.log(map_canvas_id+' '+mapformat+' '+ location_type +' '+scroll +' '+ address); //obj.siblings('.event_description').find('.evcal_gmaps').html(address); initialize(map_canvas_id, address, mapformat, zoomlevel, location_type, scroll, styles); } //console.log(options); }; }(jQuery));<|fim▁end|>
}
<|file_name|>dromaeo.py<|end_file_name|><|fim▁begin|><|fim▁hole|> from benchmarks import press from telemetry import benchmark from page_sets import dromaeo_pages @benchmark.Info(component='Blink>Bindings', emails=['[email protected]', '[email protected]', '[email protected]']) # pylint: disable=protected-access class DromaeoBenchmark(press._PressBenchmark): @classmethod def Name(cls): return 'dromaeo' def CreateStorySet(self, options): return dromaeo_pages.DromaeoStorySet()<|fim▁end|>
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file.
<|file_name|>ColorPanel.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # Note: this module is not a demo per se, but is used by many of # the demo modules for various purposes. import wx #--------------------------------------------------------------------------- class ColoredPanel(wx.Window): def __init__(self, parent, color): wx.Window.__init__(self, parent, -1, style = wx.SIMPLE_BORDER) self.SetBackgroundColour(color)<|fim▁hole|> if wx.Platform == '__WXGTK__': self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) #---------------------------------------------------------------------------<|fim▁end|>
<|file_name|>branchrevision.py<|end_file_name|><|fim▁begin|># Copyright 2009-2010 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). __metaclass__ = type __all__ = [ 'BranchRevision', ] from storm.locals import ( Int, Reference, Storm, ) from zope.interface import implements from lp.code.interfaces.branchrevision import IBranchRevision class BranchRevision(Storm): """See `IBranchRevision`.""" __storm_table__ = 'BranchRevision' __storm_primary__ = ("branch_id", "revision_id")<|fim▁hole|> branch = Reference(branch_id, 'Branch.id') revision_id = Int(name='revision', allow_none=False) revision = Reference(revision_id, 'Revision.id') sequence = Int(name='sequence', allow_none=True) def __init__(self, branch, revision, sequence=None): self.branch = branch self.revision = revision self.sequence = sequence<|fim▁end|>
implements(IBranchRevision) branch_id = Int(name='branch', allow_none=False)
<|file_name|>mode_tag.py<|end_file_name|><|fim▁begin|>CAMERA_MODE_PHOTO2 = 100 CAMERA_MODE_PHOTO = 0 CAMERA_MODE_FACE_BEAUTY = 1 CAMERA_MODE_PANORAMA = 2 CAMERA_MODE_SELF_WIDEVIEW = 3 CAMERA_MODE_SCENE_FRAME = 4 CAMERA_MODE_GESTURE_SHOT = 5 CAMERA_MODE_LIVE_PHOTO = 6 CAMERA_MODE_VIDEO = 7 CAMERA_MODE_PROFESSIONAL = 8 CAMERA_MODE_NIGHTSHOT = 9 CAMERA_MODE_PIP = 10 CAMERA_MODE_SPORTS = 11 CAMERA_MODE_VIV = 12 CAMERA_MODE_ZBAR_CODE = 13 CAMERA_MODE_REFOCUS = 14<|fim▁hole|>CAMERA_MODE_SUPERPIXEL = 16 CAMERA_MODE_CLEARSIGHT = 17 CAMERA_MODE_VIDEOBEAUTY = 18 CAMERA_MODE_VIDEOTIMELAPSE = 19 CAMERA_MODE_MONOCHROME = 20 CAMERA_MODE_PORTRAIT = 21 VALUE_CAPTURE_MODE_VIDEO = "video" VALUE_CAPTURE_MODE_AUTO = "normal" VALUE_CAPTURE_MODE_BEAUTYSHOT = "beautyshot" VALUE_CAPTURE_MODE_NIGHTSHOT = "nightshot" VALUE_CAPTURE_MODE_PANORAMA = "panorama" VALUE_CAPTURE_MODE_WIDESELF = "wideself" VALUE_CAPTURE_MODE_PROFESSIONAL = "professional" VALUE_CAPTURE_MODE_SCENE_FRAME = "sceneframe" VALUE_CAPTURE_MODE_SPORT = "sports" VALUE_CAPTURE_MODE_PIP = "pip" VALUE_CAPTURE_MODE_VIV = "viv" VALUE_CAPTURE_MODE_ZBAR = "zbarcode" VALUE_CAPTURE_MODE_REFOCUS = "refocus" VALUE_CAPTURE_MODE_CHROMAFLASH = "chromaflash" VALUE_CAPTURE_MODE_SUPERPIXEL = "superphoto" VALUE_CAPTURE_MODE_VEDOBEAUTY = "videobeauty" VALUE_CAPTURE_MODE_CLEARSIGHT = "clearsight" VALUE_CAPTURE_MODE_VEDOTIMELAPSE = "videotimelapse" VALUE_CAPTURE_MODE_MONOCHROME = "monochrome" VALUE_CAPTURE_MODE_PORTRAIT = "picselfie" VALUE_CAPTURE_MODE_VIDEOAUTOZOOM = "videoautozoom" VALUE_CAPTURE_MODE_UNKNOWN = "unknown" def get_mode_name(mode): return { CAMERA_MODE_PHOTO: VALUE_CAPTURE_MODE_AUTO, CAMERA_MODE_FACE_BEAUTY: VALUE_CAPTURE_MODE_BEAUTYSHOT, CAMERA_MODE_PANORAMA: VALUE_CAPTURE_MODE_PANORAMA, CAMERA_MODE_SELF_WIDEVIEW: VALUE_CAPTURE_MODE_WIDESELF, CAMERA_MODE_SCENE_FRAME: VALUE_CAPTURE_MODE_SCENE_FRAME, CAMERA_MODE_GESTURE_SHOT: VALUE_CAPTURE_MODE_UNKNOWN, CAMERA_MODE_LIVE_PHOTO: VALUE_CAPTURE_MODE_UNKNOWN, CAMERA_MODE_VIDEO: VALUE_CAPTURE_MODE_VIDEO, CAMERA_MODE_PROFESSIONAL: VALUE_CAPTURE_MODE_PROFESSIONAL, CAMERA_MODE_NIGHTSHOT: VALUE_CAPTURE_MODE_NIGHTSHOT, CAMERA_MODE_PIP: VALUE_CAPTURE_MODE_PIP, CAMERA_MODE_SPORTS: VALUE_CAPTURE_MODE_SPORT, CAMERA_MODE_VIV: VALUE_CAPTURE_MODE_VIV, CAMERA_MODE_ZBAR_CODE: VALUE_CAPTURE_MODE_ZBAR, CAMERA_MODE_REFOCUS: VALUE_CAPTURE_MODE_REFOCUS, CAMERA_MODE_CHROMAFLASH: VALUE_CAPTURE_MODE_CHROMAFLASH, CAMERA_MODE_SUPERPIXEL: VALUE_CAPTURE_MODE_SUPERPIXEL, CAMERA_MODE_CLEARSIGHT: VALUE_CAPTURE_MODE_CLEARSIGHT, CAMERA_MODE_VIDEOBEAUTY: VALUE_CAPTURE_MODE_VEDOBEAUTY, CAMERA_MODE_VIDEOTIMELAPSE: VALUE_CAPTURE_MODE_VEDOTIMELAPSE, CAMERA_MODE_MONOCHROME: VALUE_CAPTURE_MODE_MONOCHROME, CAMERA_MODE_PORTRAIT: VALUE_CAPTURE_MODE_PORTRAIT }.get(mode) action = "android.myos.action.%s" ACTION_NOMARL_CAMERA = "NOMARLCAMERA" # normal ACTION_NS_CAMERA = "NSCAMERA" # night ACTION_BEATY_CAMERA = "BEATYCAMERA" # beauty ACTION_SUPERPIXEL_CAMERA = "SUPERPIXELCAMERA" # super photo # ACTION_PRO_CAMERA = "PROCAMERA" # ACTION_WIDESELFT_CAMERA = "WIDESELFCAMERA" # ACTION_SPORT_CAMERA = "SPORTCAMERA" # ACTION_SMARTFOCUS_CAMERA = "SMARTFOCUSCAMERA" # ACTION_SMARTFLASH_CAMERA = "SMARTFLASHCAMERA" # ACTION_PANORAMA_CAMERA = "PANORAMACAMERA" # ACTION_MONOCHROME_CAMERA = "MONOCHROMECAMERA" def get_actions(): actions = [action % ACTION_NS_CAMERA, action % ACTION_BEATY_CAMERA, action % ACTION_NOMARL_CAMERA, action % ACTION_SUPERPIXEL_CAMERA] return actions<|fim▁end|>
CAMERA_MODE_CHROMAFLASH = 15
<|file_name|>button-bar.tsx<|end_file_name|><|fim▁begin|>import * as React from 'react' import Box from './box' import {globalStyles, isMobile, collapseStyles, isTablet} from '../styles' type Props = { direction: 'row' | 'column' align?: 'flex-start' | 'flex-end' | 'center' // ignored by column,,, children: React.ReactNode fullWidth?: boolean small?: boolean // ignored by column,,, style?: any } class ButtonBar extends React.PureComponent<Props> { static defaultProps = { align: 'center', direction: 'row', fullWidth: false, small: false, } _spacing = () => { if (this.props.direction === 'row' && this.props.small && !isMobile) { return SmallSpacer } return BigSpacer } _surroundSpacing = () => { return this.props.direction === 'column' } render() { const Spacing = this._spacing() const surroundSpacing = this._surroundSpacing() const children = React.Children.toArray(this.props.children) const childrenWithSpacing = children.reduce<Array<React.ReactNode>>((arr, c, idx) => { if (surroundSpacing || idx > 0) { arr.push(<Spacing key={arr.length} />) } arr.push(c) if (surroundSpacing && idx === children.length - 1) { arr.push(<Spacing key={arr.length} />) } return arr }, []) const minHeight = { minHeight: isMobile ? (this.props.small ? 64 : 72) : this.props.small ? 44 : 64, } const style = collapseStyles([ { alignItems: this.props.fullWidth ? 'stretch' : 'center', width: '100%',<|fim▁hole|> : { ...globalStyles.flexBoxRow, justifyContent: this.props.align, ...minHeight, }), }, this.props.style, ]) return <Box style={style}>{childrenWithSpacing}</Box> } } // Note explicitly not using globalMargins here. We don't necessarily want this spacing to change ever const BigSpacer = () => <Box style={bigSpacerStyle} /> const bigSpacerStyle = { flexShrink: 0, height: 8, width: 8, } const SmallSpacer = () => <Box style={smallSpacerStyle} /> const smallSpacerStyle = { flexShrink: 0, height: isMobile ? 8 : 4, width: isMobile ? 8 : 4, } export default ButtonBar<|fim▁end|>
...(isTablet ? {maxWidth: 460} : {}), ...(this.props.direction === 'column' ? {...globalStyles.flexBoxColumn}
<|file_name|>graphqlOptions.d.ts<|end_file_name|><|fim▁begin|>import { GraphQLSchema, ValidationContext, GraphQLFieldResolver } from 'graphql'; import { LogFunction } from './runQuery'; import { CacheControlExtensionOptions } from 'apollo-cache-control'; export interface GraphQLServerOptions<TContext = any> { schema: GraphQLSchema; formatError?: Function; rootValue?: any; context?: TContext; logFunction?: LogFunction; formatParams?: Function; validationRules?: Array<(context: ValidationContext) => any>; formatResponse?: Function; fieldResolver?: GraphQLFieldResolver<any, TContext>; debug?: boolean; tracing?: boolean; cacheControl?: boolean | CacheControlExtensionOptions;<|fim▁hole|>export declare function resolveGraphqlOptions(options: GraphQLServerOptions | Function, ...args: any[]): Promise<GraphQLServerOptions>;<|fim▁end|>
} export default GraphQLServerOptions;
<|file_name|>classify.py<|end_file_name|><|fim▁begin|># Load pickled data import pickle import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import ImageGrid import numpy as np import tensorflow as tf from tensorflow.contrib.layers import flatten from sklearn.utils import shuffle class Data: def __init__(self): training_file = 'data/train.p' validation_file= 'data/valid.p' testing_file = 'data/test.p' with open(training_file, mode='rb') as f: train = pickle.load(f) with open(validation_file, mode='rb') as f: valid = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) self.X_train, self.y_train = train['features'], train['labels'] self.X_valid, self.y_valid = valid['features'], valid['labels'] self.X_test, self.y_test = test['features'], test['labels'] def render_data(self): image_with_label = zip(self.X_train, self.y_train) seen_labels = set() fig = plt.figure(figsize=(200, 200)) total_unique_labels = len(set(self.y_train)) unique_rows = total_unique_labels // 5 + 1 grid = ImageGrid(fig, 151, # similar to subplot(141) nrows_ncols=(unique_rows, 5), axes_pad=0.05, label_mode="1", ) i = 0 for i_l in image_with_label: img, label = i_l if label not in seen_labels: im = grid[i].imshow(img) seen_labels.add(label) i += 1 plt.show() def LeNet(x, max_labels): # Hyper parameters mu = 0 sigma = 0.1 # Convolutional Layer 1: Input = 32x32x3 Output = 28x28x6 conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean=mu, stddev=sigma), name="v1") conv1_b = tf.Variable(tf.zeros(6), name="v2") conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b # Activation Layer conv1 = tf.nn.relu(conv1) # Max Pooling : Input = 28x28x6 Output = 14x14x6 conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') <|fim▁hole|> conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b # Activation Layer conv2 = tf.nn.relu(conv2) # Max Pooling : Input = 10x10x16 Output = 5x5x16 conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # Fully Connected Layer fc0 = flatten(conv2) # Layer 3 - Fully Connected: Input = 400 Output = 120 fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma), name="v5") fc1_b = tf.Variable(tf.zeros(120), name="v6") fc1 = tf.matmul(fc0, fc1_W) + fc1_b # Activation fc1 = tf.nn.relu(fc1) # Layer 4 : Fully Connected: Input = 120 Output = 84 fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma), name="v7") fc2_b = tf.Variable(tf.zeros(84), name="v8") fc2 = tf.matmul(fc1, fc2_W) + fc2_b # Activation fc2 = tf.nn.relu(fc2) # Layer 5 - Fully Connected Input = 84 Output = 10 fc3_W = tf.Variable(tf.truncated_normal(shape=(84, max_labels), mean=mu, stddev=sigma), name="v9") fc3_b = tf.Variable(tf.zeros(max_labels), name="v10") logits = tf.matmul(fc2, fc3_W) + fc3_b return logits def train(max_classified_id): x = tf.placeholder(tf.float32, (None, 32, 32, 3), name="X") y = tf.placeholder(tf.int32, (None), name="Y") one_hot_y = tf.one_hot(y, max_classified_id) rate = 0.001 logits = LeNet(x, max_classified_id) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_y) loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate=rate) training_operation = optimizer.minimize(loss_operation) correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1)) accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver(), training_operation, accuracy_operation return saver, training_operation, accuracy_operation, x, y def evaluate(x, y, X_data, y_data, accuracy_operation, BATCH_SIZE): num_examples = len(X_data) total_accuracy = 0 sess = tf.get_default_session() for offset in range(0, num_examples, BATCH_SIZE): batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE] accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y}) total_accuracy += (accuracy * len(batch_x)) return total_accuracy / num_examples def main(): data = Data() EPOCHS = 10 BATCH_SIZE = 128 max_classified_id = np.max(data.y_train) saver, training_operation, accuracy_operation, x, y = train(max_classified_id) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(data.X_train) print("Training...") print() for i in range(EPOCHS): X_train, y_train = shuffle(data.X_train, data.y_train) for offset in range(0, num_examples, BATCH_SIZE): end = offset + BATCH_SIZE batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y}) validation_accuracy = evaluate(x, y, data.X_valid, data.y_valid, accuracy_operation, BATCH_SIZE) print("EPOCH {} ...".format(i+1)) print("Validation Accuracy = {:.3f}".format(validation_accuracy)) print() saver.save(sess, './lenet') print("Model saved") if __name__ == "__main__": main()<|fim▁end|>
# Convolutional Layer 2: Input = 14x14x6 Output: 10x10x16 conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma), name="v3") conv2_b = tf.Variable(tf.zeros(16), name="v4")
<|file_name|>parity.cc<|end_file_name|><|fim▁begin|>// -*- coding: utf-8 -*- // Copyright (C) 2016 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // // Spot is free software; you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 3 of the License, or // (at your option) any later version. // // Spot is distributed in the hope that it will be useful, but WITHOUT // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY // or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public // License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. #include <cassert> #include <ctime> #include <vector> #include <spot/twaalgos/dualize.hh> #include <spot/twaalgos/hoa.hh> #include <spot/twaalgos/iscolored.hh> #include <spot/twaalgos/parity.hh> #include <spot/twaalgos/product.hh> #include <spot/twaalgos/randomgraph.hh> #include <spot/misc/random.hh> #include <spot/twaalgos/complete.hh> #include <spot/twa/twagraph.hh> #include <spot/twa/fwd.hh> #include <spot/twa/acc.hh> #include <spot/misc/trival.hh> #include <utility> #include <string> #include <iostream> #define LAST_AUT result.back().first #define LAST_NUM_SETS result.back().second #define NEW_AUT() do { \ result.emplace_back(spot::random_graph(6, 0.5, &apf, \ current_bdd, 0, 0, 0.5, true), 0); \ LAST_NUM_SETS = 0; \ /* print_hoa need this */ \ LAST_AUT->prop_state_acc(spot::trival::maybe()); \ } while (false) #define SET_TR(t, value) do { \ unsigned value_tmp = value; \ if (value_tmp + 1 > LAST_NUM_SETS) \ LAST_NUM_SETS = value_tmp + 1; \ t.acc.set(value_tmp); \ } while (false) static std::vector<std::pair<spot::twa_graph_ptr, unsigned>> generate_aut(const spot::bdd_dict_ptr& current_bdd) { spot::atomic_prop_set apf = spot::create_atomic_prop_set(3); std::vector<std::pair<spot::twa_graph_ptr, unsigned>> result; // No accset on any transition NEW_AUT(); // The same accset on every transitions NEW_AUT(); for (auto& t: LAST_AUT->edges()) SET_TR(t, 0); // All used / First unused / Last unused / First and last unused for (auto incr_ext: { 0, 1 }) for (auto used: { 1, 2 }) for (auto modulo: { 4, 5, 6 }) if (incr_ext + modulo <= 6) { NEW_AUT(); unsigned count = 0; for (auto& t: LAST_AUT->edges()) if (std::rand() % used == 0) { auto value = ++count % modulo + incr_ext; SET_TR(t, value); } } // One-Three in middle not used for (auto i: { 0, 1 }) for (auto start: { 1, 2 }) for (auto unused: { 1, 2, 3 }) { NEW_AUT(); auto count = 0; for (auto& t: LAST_AUT->edges()) { int val = 0; if (count % (3 + i) < start) val = count % (3 + i); else val = count % (3 + i) + unused; SET_TR(t, val); } } // All accset on all transitions for (auto i: { 0, 1 }) { NEW_AUT(); for (auto& t: LAST_AUT->edges()) for (auto acc = 0; acc < 5 + i; ++acc) SET_TR(t, acc); } // Some random automata std::vector<std::vector<int>> cont_sets; for (auto i = 0; i <= 6; ++i) { std::vector<int> cont_set; for (auto j = 0; j < i; ++j) cont_set.push_back(j); cont_sets.push_back(cont_set); } for (auto min: { 0, 1 }) { for (auto num_sets: { 1, 2, 5, 6 }) for (auto i = 0; i < 10; ++i) { NEW_AUT(); for (auto& t: LAST_AUT->edges()) { auto nb_acc = std::rand() % (num_sets - min + 1) + min; std::random_shuffle(cont_sets[num_sets].begin(), cont_sets[num_sets].end()); for (auto j = 0; j < nb_acc; ++j) SET_TR(t, cont_sets[num_sets][j]); } } for (auto num_sets: {2, 3}) for (auto even: {0, 1}) if ((num_sets - 1) * 2 + even < 6) { NEW_AUT(); for (auto& t: LAST_AUT->edges()) { auto nb_acc = std::rand() % (num_sets - min + 1) + min; std::random_shuffle(cont_sets[num_sets].begin(), cont_sets[num_sets].end()); for (auto j = 0; j < nb_acc; ++j) { auto value = cont_sets[num_sets][j] * 2 + even; SET_TR(t, value); } } } } return result; } static std::vector<std::tuple<spot::acc_cond::acc_code, bool, bool, unsigned>> generate_acc() { std::vector<std::tuple<spot::acc_cond::acc_code, bool, bool, unsigned>> result; for (auto max: { true, false }) for (auto odd: { true, false }) for (auto num_sets: { 0, 1, 2, 5, 6 }) result.emplace_back(spot::acc_cond::acc_code::parity(max, odd, num_sets), max, odd, num_sets); return result; } static bool is_included(spot::const_twa_graph_ptr left, spot::const_twa_graph_ptr right, bool first_left) { auto tmp = spot::dualize(right); auto product = spot::product(left, tmp); if (!product->is_empty()) { std::cerr << "======Not included======" << std::endl; if (first_left) std::cerr << "======First automaton======" << std::endl; else std::cerr << "======Second automaton======" << std::endl; spot::print_hoa(std::cerr, left); std::cerr << std::endl; if (first_left) std::cerr << "======Second automaton======" << std::endl; else std::cerr << "======First automaton======" << std::endl; spot::print_hoa(std::cerr, right); std::cerr << std::endl; if (first_left) std::cerr << "======!Second automaton======" << std::endl; else std::cerr << "======!First automaton======" << std::endl; spot::print_hoa(std::cerr, tmp); std::cerr << std::endl; if (first_left) std::cerr << "======First X !Second======" <<std::endl; else std::cerr << "======Second X !First======" <<std::endl; spot::print_hoa(std::cerr, product); std::cerr << std::endl; return false; } return true; } static bool are_equiv(spot::const_twa_graph_ptr left, spot::const_twa_graph_ptr right) { return is_included(left, right, true) && is_included(right, left, false); } static bool is_right_parity(spot::const_twa_graph_ptr aut, spot::parity_kind target_kind, spot::parity_style target_style, bool origin_max, bool origin_odd, unsigned num_sets) { bool is_max; bool is_odd; if (!aut->acc().is_parity(is_max, is_odd)) return false; bool target_max; bool target_odd; if (aut->num_sets() <= 1 || num_sets <= 1 || target_kind == spot::parity_kind_any) target_max = is_max; else if (target_kind == spot::parity_kind_max) target_max = true; else if (target_kind == spot::parity_kind_min) target_max = false; else target_max = origin_max; if (aut->num_sets() == 0 || num_sets == 0 || target_style == spot::parity_style_any) target_odd = is_odd; else if (target_style == spot::parity_style_odd) target_odd = true; else if (target_style == spot::parity_style_even) target_odd = false; else target_odd = origin_odd; if (!(is_max == target_max && is_odd == target_odd)) { std::cerr << "======Wrong accceptance======" << std::endl; std::string kind[] = { "max", "min", "same", "any" }; std::string style[] = { "odd", "even", "same", "any" }; std::cerr << "target: " << kind[target_kind] << ' ' << style[target_style] << std::endl; std::cerr << "origin: " << kind[origin_max ? 0 : 1] << ' ' << style[origin_odd ? 0 : 1] << ' ' << num_sets << std::endl; std::cerr << "actually: " << kind[is_max ? 0 : 1] << ' ' << style[is_odd ? 0 : 1] << ' ' << aut->num_sets() << std::endl; std::cerr << std::endl; return false; } return true; } static bool is_almost_colored(spot::const_twa_graph_ptr aut) { for (auto t: aut->edges()) if (t.acc.count() > 1) { std::cerr << "======Not colored======" << std::endl; spot::print_hoa(std::cerr, aut); std::cerr << std::endl; return false; } return true; } static bool is_colored_printerr(spot::const_twa_graph_ptr aut) { bool result = is_colored(aut); if (!result) { std::cerr << "======Not colored======" << std::endl; spot::print_hoa(std::cerr, aut); std::cerr << std::endl; } return result; } static spot::parity_kind to_parity_kind(bool is_max) { if (is_max) return spot::parity_kind_max; return spot::parity_kind_min; } static spot::parity_style to_parity_style(bool is_odd) { if (is_odd) return spot::parity_style_odd; return spot::parity_style_even; } int main() { auto current_bdd = spot::make_bdd_dict(); spot::srand(0); auto parity_kinds = { spot::parity_kind_max, spot::parity_kind_min, spot::parity_kind_same, spot::parity_kind_any, }; auto parity_styles = { spot::parity_style_odd, spot::parity_style_even, spot::parity_style_same, spot::parity_style_any, }; auto acceptance_sets = generate_acc(); auto automata_tuples = generate_aut(current_bdd); unsigned num_automata = automata_tuples.size(); unsigned num_acceptance = acceptance_sets.size(); std::cerr << "num of automata: " << num_automata << '\n'; std::cerr << "num of acceptance expression: " << num_acceptance << '\n'; for (auto acc_tuple: acceptance_sets) for (auto& aut_tuple: automata_tuples) { auto& aut = aut_tuple.first; auto aut_num_sets = aut_tuple.second; auto acc = std::get<0>(acc_tuple); auto is_max = std::get<1>(acc_tuple); auto is_odd = std::get<2>(acc_tuple); auto acc_num_sets = std::get<3>(acc_tuple); if (aut_num_sets <= acc_num_sets) { aut->set_acceptance(acc_num_sets, acc); // Check change_parity for (auto kind: parity_kinds) for (auto style: parity_styles) { auto output = spot::change_parity(aut, kind, style); assert(is_right_parity(output, kind, style, is_max, is_odd, acc_num_sets) && "change_parity: wrong acceptance."); assert(are_equiv(aut, output) && "change_parity: not equivalent."); assert(is_almost_colored(output) && "change_parity: too many acc on a transition"); } // Check colorize_parity for (auto keep_style: { true, false }) { auto output = spot::colorize_parity(aut, keep_style); assert(is_colored_printerr(output) && "colorize_parity: not colored."); assert(are_equiv(aut, output) && "colorize_parity: not equivalent."); auto target_kind = to_parity_kind(is_max); auto target_style = keep_style ? to_parity_style(is_odd) : spot::parity_style_any; assert(is_right_parity(output, target_kind, target_style, is_max, is_odd, acc_num_sets) && "change_parity: wrong acceptance."); } // Check cleanup_parity for (auto keep_style: { true, false }) { auto output = spot::cleanup_parity(aut, keep_style); assert(is_almost_colored(output) && "cleanup_parity: too many acc on a transition."); assert(are_equiv(aut, output) && "cleanup_parity: not equivalent."); auto target_kind = to_parity_kind(is_max); auto target_style = keep_style ? to_parity_style(is_odd) : spot::parity_style_any; assert(is_right_parity(output, target_kind, target_style, is_max, is_odd, acc_num_sets) && "cleanup_parity: wrong acceptance."); } } } std::random_shuffle(automata_tuples.begin(), automata_tuples.end()); unsigned num_left = 15; unsigned num_right = 15; unsigned acc_index = 0; unsigned nb = 0; // Parity product and sum for (unsigned left_index = 0; left_index < num_left; ++left_index) { auto& aut_tuple_first = automata_tuples[left_index % num_automata]; auto& left = aut_tuple_first.first; auto aut_num_sets_first = aut_tuple_first.second; while (std::get<3>(acceptance_sets[acc_index]) < aut_num_sets_first) acc_index = (acc_index + 1) % num_acceptance; auto acc_tuple_first = acceptance_sets[acc_index]; acc_index = (acc_index + 1) % num_acceptance; auto acc_first = std::get<0>(acc_tuple_first); auto acc_num_sets_first = std::get<3>(acc_tuple_first); left->set_acceptance(acc_num_sets_first, acc_first); for (unsigned right_index = 0; right_index < num_right; ++right_index) { auto& aut_tuple_second = automata_tuples[(num_left + right_index) % num_automata]; auto& right = aut_tuple_second.first; auto aut_num_sets_second = aut_tuple_second.second; while (std::get<3>(acceptance_sets[acc_index]) < aut_num_sets_second) acc_index = (acc_index + 1) % num_acceptance; auto acc_tuple_second = acceptance_sets[acc_index]; acc_index = (acc_index + 1) % num_acceptance; auto acc_second = std::get<0>(acc_tuple_second); auto acc_num_sets_second = std::get<3>(acc_tuple_second); right->set_acceptance(acc_num_sets_second, acc_second); auto result_prod = spot::parity_product(left, right); auto ref_prod = spot::product(left, right); if (!are_equiv(result_prod, ref_prod)) { std::cerr << nb << ": parity_product: Not equivalent.\n" << "=====First Automaton=====\n"; spot::print_hoa(std::cerr, left); std::cerr << "=====Second Automaton=====\n"; spot::print_hoa(std::cerr, right); assert(false && "parity_product: Not equivalent.\n"); } assert(is_colored_printerr(result_prod) && "parity_product: not colored."); assert(is_right_parity(result_prod, spot::parity_kind_any, spot::parity_style_any, true, true, 2) && "parity_product: not a parity acceptance condition"); auto result_sum = spot::parity_product_or(left, right); auto ref_sum = spot::product_or(left, right); if (!are_equiv(result_sum, ref_sum)) { std::cerr << nb << ": parity_product_or: Not equivalent.\n" << "=====First Automaton=====\n"; spot::print_hoa(std::cerr, left); std::cerr << "=====Second Automaton=====\n"; spot::print_hoa(std::cerr, right); assert(false && "parity_product_or: Not equivalent.\n"); } assert(is_colored_printerr(result_sum) && "parity_product_or: not colored."); assert(is_right_parity(result_sum, spot::parity_kind_any, spot::parity_style_any, true, true, 2)<|fim▁hole|> && "parity_product_or: not a parity acceptance condition"); ++nb; } } return 0; }<|fim▁end|>
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
module.exports = require('./lib/dustjs-browserify');
<|file_name|>vfs.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """A simple utility for constructing filesystem-like trees from beets libraries. """ from __future__ import division, absolute_import, print_function from collections import namedtuple from beets import util Node = namedtuple('Node', ['files', 'dirs']) <|fim▁hole|>def _insert(node, path, itemid): """Insert an item into a virtual filesystem node.""" if len(path) == 1: # Last component. Insert file. node.files[path[0]] = itemid else: # In a directory. dirname = path[0] rest = path[1:] if dirname not in node.dirs: node.dirs[dirname] = Node({}, {}) _insert(node.dirs[dirname], rest, itemid) def libtree(lib): """Generates a filesystem-like directory tree for the files contained in `lib`. Filesystem nodes are (files, dirs) named tuples in which both components are dictionaries. The first maps filenames to Item ids. The second maps directory names to child node tuples. """ root = Node({}, {}) for item in lib.items(): dest = item.destination(fragment=True) parts = util.components(dest) _insert(root, parts, item.id) return root<|fim▁end|>
<|file_name|>ldparser.cpp<|end_file_name|><|fim▁begin|>/**************************************************************************** ** ** Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies). ** Contact: http://www.qt-project.org/legal ** ** This file is part of Qt Creator. ** ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and Digia. For licensing terms and ** conditions see http://qt.digia.com/licensing. For further information ** use the contact form at http://qt.digia.com/contact-us. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 2.1 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 2.1 requirements ** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ** ** In addition, as a special exception, Digia gives you certain additional ** rights. These rights are described in the Digia Qt LGPL Exception ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ** ****************************************************************************/ #include "ldparser.h" #include "projectexplorerconstants.h" #include "task.h" using namespace ProjectExplorer; namespace { // opt. drive letter + filename: (2 brackets) const char * const FILE_PATTERN = "(([A-Za-z]:)?[^:]+\\.[^:]+):"; // line no. or elf segment + offset (1 bracket) // const char * const POSITION_PATTERN = "(\\d+|\\(\\.[^:]+[+-]0x[a-fA-F0-9]+\\):)"; const char * const POSITION_PATTERN = "(\\d|\\(\\..+[+-]0x[a-fA-F0-9]+\\):)"; const char * const COMMAND_PATTERN = "^(.*[\\\\/])?([a-z0-9]+-[a-z0-9]+-[a-z0-9]+-)?(ld|gold)(-[0-9\\.]+)?(\\.exe)?: "; } LdParser::LdParser() { setObjectName(QLatin1String("LdParser")); m_regExpLinker.setPattern(QLatin1Char('^') + QString::fromLatin1(FILE_PATTERN) + QLatin1Char('(') + QString::fromLatin1(FILE_PATTERN) + QLatin1String(")?(") + QLatin1String(POSITION_PATTERN) + QLatin1String(")?\\s(.+)$")); m_regExpLinker.setMinimal(true); m_regExpGccNames.setPattern(QLatin1String(COMMAND_PATTERN)); m_regExpGccNames.setMinimal(true); } void LdParser::stdError(const QString &line) { QString lne = rightTrimmed(line); if (lne.startsWith(QLatin1String("TeamBuilder ")) || lne.startsWith(QLatin1String("distcc[")) || lne.contains(QLatin1String("ar: creating "))) { IOutputParser::stdError(line); return; } if (lne.startsWith(QLatin1String("collect2:"))) { emit addTask(Task(Task::Error, lne /* description */, Utils::FileName() /* filename */, -1 /* linenumber */, Core::Id(Constants::TASK_CATEGORY_COMPILE))); return; } else if (m_regExpGccNames.indexIn(lne) > -1) { QString description = lne.mid(m_regExpGccNames.matchedLength());<|fim▁hole|> Task task(Task::Error, description, Utils::FileName(), /* filename */ -1, /* line */ Core::Id(Constants::TASK_CATEGORY_COMPILE)); if (description.startsWith(QLatin1String("warning: "))) { task.type = Task::Warning; task.description = description.mid(9); } else if (description.startsWith(QLatin1String("fatal: "))) { task.description = description.mid(7); } emit addTask(task); return; } else if (m_regExpLinker.indexIn(lne) > -1) { bool ok; int lineno = m_regExpLinker.cap(7).toInt(&ok); if (!ok) lineno = -1; Utils::FileName filename = Utils::FileName::fromUserInput(m_regExpLinker.cap(1)); if (!m_regExpLinker.cap(4).isEmpty() && !m_regExpLinker.cap(4).startsWith(QLatin1String("(.text"))) filename = Utils::FileName::fromUserInput(m_regExpLinker.cap(4)); QString description = m_regExpLinker.cap(8).trimmed(); Task task(Task::Error, description, filename, lineno, Core::Id(Constants::TASK_CATEGORY_COMPILE)); if (description.startsWith(QLatin1String("At global scope")) || description.startsWith(QLatin1String("At top level")) || description.startsWith(QLatin1String("instantiated from ")) || description.startsWith(QLatin1String("In "))) task.type = Task::Unknown; if (description.startsWith(QLatin1String("warning: "), Qt::CaseInsensitive)) { task.type = Task::Warning; task.description = description.mid(9); } emit addTask(task); return; } IOutputParser::stdError(line); }<|fim▁end|>
<|file_name|>pages_tags.py<|end_file_name|><|fim▁begin|>from django import template from django.template.loader_tags import BaseIncludeNode from django.template import Template from django.conf import settings from pages.plugins import html_to_template_text, SearchBoxNode from pages.plugins import LinkNode, EmbedCodeNode from pages import models from django.utils.text import unescape_string_literal from pages.models import Page, slugify from django.core.urlresolvers import reverse register = template.Library() @register.filter def name_to_url(value): return models.name_to_url(value) name_to_url.is_safe = True <|fim▁hole|>class PageContentNode(BaseIncludeNode): def __init__(self, html_var, render_plugins=True, *args, **kwargs): super(PageContentNode, self).__init__(*args, **kwargs) self.html_var = template.Variable(html_var) self.render_plugins = render_plugins def render(self, context): try: html = unicode(self.html_var.resolve(context)) t = Template(html_to_template_text(html, context, self.render_plugins)) return self.render_template(t, context) except: if settings.TEMPLATE_DEBUG: raise return '' class IncludeContentNode(BaseIncludeNode): """ Base class for including some named content inside a other content. Subclass and override get_content() and get_title() to return HTML or None. The name of the content to include is stored in self.name All other parameters are stored in self.args, without quotes (if any). """ def __init__(self, parser, token, *args, **kwargs): super(IncludeContentNode, self).__init__(*args, **kwargs) bits = token.split_contents() if len(bits) < 2: raise template.TemplateSyntaxError, ('%r tag requires at least one' ' argument' % token.contents.split()[0]) self.args = [] for b in bits[1:]: if is_quoted(b): b = unescape_string_literal(b) self.args.append(b) self.name = self.args.pop(0) def get_content(self, context): """ Override this to return content to be included. """ return None def get_title(self, context): """ Override this to return a title or None to omit it. """ return self.name def render(self, context): try: template_text = '' if 'showtitle' in self.args: title = self.get_title(context) if title: template_text += '<h2>%s</h2>' % title template_text += self.get_content(context) template = Template(template_text) return self.render_template(template, context) except: if settings.TEMPLATE_DEBUG: raise return '' class IncludePageNode(IncludeContentNode): def __init__(self, *args, **kwargs): super(IncludePageNode, self).__init__(*args, **kwargs) try: self.page = Page.objects.get(slug__exact=slugify(self.name)) except Page.DoesNotExist: self.page = None def get_title(self, context): if not self.page: return None return ('<a href="%s">%s</a>' % (self.get_page_url(), self.page.name)) def get_page_url(self): if self.page: slug = self.page.pretty_slug else: slug = name_to_url(self.name) return reverse('pages:show', args=[slug]) def get_content(self, context): if not self.page: return ('<p class="plugin includepage">Unable to include ' '<a href="%s" class="missing_link">%s</a></p>' % (self.get_page_url(), self.name)) # prevent endless loops context_page = context['page'] include_stack = context.get('_include_stack', []) include_stack.append(context_page.name) if self.page.name in include_stack: return ('<p class="plugin includepage">Unable to' ' include <a href="%s">%s</a>: endless include' ' loop.</p>' % (self.get_page_url(), self.page.name)) context['_include_stack'] = include_stack context['page'] = self.page template_text = html_to_template_text(self.page.content, context) # restore context context['_include_stack'].pop() context['page'] = context_page return template_text @register.tag(name='render_plugins') def do_render_plugins(parser, token, render_plugins=True): """ Render tags and plugins """ try: tag, html_var = token.split_contents() except ValueError: raise template.TemplateSyntaxError, ("%r tag requires one argument" % token.contents.split()[0]) return PageContentNode(html_var, render_plugins) @register.tag(name='render_tags') def do_render_tags(parser, token): """ Render tags only, does not render plugins """ return do_render_plugins(parser, token, render_plugins=False) @register.tag(name='include_page') def do_include_page(parser, token): return IncludePageNode(parser, token) def is_quoted(text): return text[0] == text[-1] and text[0] in ('"', "'") @register.tag(name='embed_code') def do_embed_code(parser, token): nodelist = parser.parse(('endembed_code',)) parser.delete_first_token() return EmbedCodeNode(nodelist) @register.tag(name='searchbox') def do_searchbox(parser, token): try: tag, query = token.split_contents() except ValueError: raise template.TemplateSyntaxError('%r tag requires one argument' % token.contents.split()[0]) if not is_quoted(query): raise template.TemplateSyntaxError( "%r tag's argument should be in quotes" % token.contents.split()[0]) return SearchBoxNode(query=unescape_string_literal(query)) @register.tag(name='link') def do_link(parser, token): try: tag, href = token.split_contents() except ValueError: raise template.TemplateSyntaxError("%r tag requires one argument" % token.contents.split()[0]) if not is_quoted(href): raise template.TemplateSyntaxError( "%r tag's argument should be in quotes" % token.contents.split()[0]) nodelist = parser.parse(('endlink',)) parser.delete_first_token() return LinkNode(unescape_string_literal(href), nodelist)<|fim▁end|>
<|file_name|>plugin.js<|end_file_name|><|fim▁begin|>/* Copyright (c) 2003-2011, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ CKEDITOR.plugins.add('resize', { init:function (editor) { var config = editor.config; // Resize in the same direction of chrome, // which is identical to dir of editor element. (#6614) var resizeDir = editor.element.getDirection(1); !config.resize_dir && ( config.resize_dir = 'both' ); ( config.resize_maxWidth == undefined ) && ( config.resize_maxWidth = 3000 ); ( config.resize_maxHeight == undefined ) && ( config.resize_maxHeight = 3000 ); ( config.resize_minWidth == undefined ) && ( config.resize_minWidth = 750 ); ( config.resize_minHeight == undefined ) && ( config.resize_minHeight = 250 ); if (config.resize_enabled !== false) { var container = null, origin, startSize, resizeHorizontal = ( config.resize_dir == 'both' || config.resize_dir == 'horizontal' ) && ( config.resize_minWidth != config.resize_maxWidth ), resizeVertical = ( config.resize_dir == 'both' || config.resize_dir == 'vertical' ) && ( config.resize_minHeight != config.resize_maxHeight ); function dragHandler(evt) { var dx = evt.data.$.screenX - origin.x, dy = evt.data.$.screenY - origin.y, width = startSize.width, height = startSize.height, internalWidth = width + dx * ( resizeDir == 'rtl' ? -1 : 1 ), internalHeight = height + dy; if (resizeHorizontal) { width = Math.max(config.resize_minWidth, Math.min(internalWidth, config.resize_maxWidth)); } if (resizeVertical) { height = Math.max(config.resize_minHeight, Math.min(internalHeight, config.resize_maxHeight)); } editor.resize(width, height); } function dragEndHandler(evt) { CKEDITOR.document.removeListener('mousemove', dragHandler); CKEDITOR.document.removeListener('mouseup', dragEndHandler); if (editor.document) {<|fim▁hole|> } var mouseDownFn = CKEDITOR.tools.addFunction(function ($event) { if (!container) { container = editor.getResizable(); } startSize = { width:container.$.offsetWidth || 0, height:container.$.offsetHeight || 0 }; origin = { x:$event.screenX, y:$event.screenY }; config.resize_minWidth > startSize.width && ( config.resize_minWidth = startSize.width ); config.resize_minHeight > startSize.height && ( config.resize_minHeight = startSize.height ); CKEDITOR.document.on('mousemove', dragHandler); CKEDITOR.document.on('mouseup', dragEndHandler); if (editor.document) { editor.document.on('mousemove', dragHandler); editor.document.on('mouseup', dragEndHandler); } }); editor.on('destroy', function () { CKEDITOR.tools.removeFunction(mouseDownFn); }); editor.on('themeSpace', function (event) { if (event.data.space == 'bottom') { var direction = ''; if (resizeHorizontal && !resizeVertical) { direction = ' cke_resizer_horizontal'; } if (!resizeHorizontal && resizeVertical) { direction = ' cke_resizer_vertical'; } var resizerHtml = '<div' + ' class="cke_resizer' + direction + ' cke_resizer_' + resizeDir + '"' + ' title="' + CKEDITOR.tools.htmlEncode(editor.lang.resize) + '"' + ' onmousedown="CKEDITOR.tools.callFunction(' + mouseDownFn + ', event)"' + '></div>'; // Always sticks the corner of botttom space. resizeDir == 'ltr' && direction == 'ltr' ? event.data.html += resizerHtml : event.data.html = resizerHtml + event.data.html; } }, editor, null, 100); } } }); /** * The minimum editor width, in pixels, when resizing the editor interface by using the resize handle. * Note: It falls back to editor's actual width if it is smaller than the default value. * @name CKEDITOR.config.resize_minWidth * @type Number * @default 750 * @example * config.resize_minWidth = 500; */ /** * The minimum editor height, in pixels, when resizing the editor interface by using the resize handle. * Note: It falls back to editor's actual height if it is smaller than the default value. * @name CKEDITOR.config.resize_minHeight * @type Number * @default 250 * @example * config.resize_minHeight = 600; */ /** * The maximum editor width, in pixels, when resizing the editor interface by using the resize handle. * @name CKEDITOR.config.resize_maxWidth * @type Number * @default 3000 * @example * config.resize_maxWidth = 750; */ /** * The maximum editor height, in pixels, when resizing the editor interface by using the resize handle. * @name CKEDITOR.config.resize_maxHeight * @type Number * @default 3000 * @example * config.resize_maxHeight = 600; */ /** * Whether to enable the resizing feature. If this feature is disabled, the resize handle will not be visible. * @name CKEDITOR.config.resize_enabled * @type Boolean * @default true * @example * config.resize_enabled = false; */ /** * The dimensions for which the editor resizing is enabled. Possible values * are <code>both</code>, <code>vertical</code>, and <code>horizontal</code>. * @name CKEDITOR.config.resize_dir * @type String * @default 'both' * @since 3.3 * @example * config.resize_dir = 'vertical'; */<|fim▁end|>
editor.document.removeListener('mousemove', dragHandler); editor.document.removeListener('mouseup', dragEndHandler); }
<|file_name|>random_distributed_scalar_test.py<|end_file_name|><|fim▁begin|># ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- from cStringIO import StringIO import sys import tempfile import unittest2 as unittest import numpy from nupic.encoders.base import defaultDtype from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA from nupic.data.fieldmeta import FieldMetaType from nupic.support.unittesthelpers.algorithm_test_helpers import getSeed from nupic.encoders.random_distributed_scalar import ( RandomDistributedScalarEncoder ) try: import capnp except ImportError: capnp = None if capnp: from nupic.encoders.random_distributed_scalar_capnp import ( RandomDistributedScalarEncoderProto ) # Disable warnings about accessing protected members # pylint: disable=W0212 def computeOverlap(x, y): """ Given two binary arrays, compute their overlap. The overlap is the number of bits where x[i] and y[i] are both 1 """ return (x & y).sum() def validateEncoder(encoder, subsampling): """ Given an encoder, calculate overlaps statistics and ensure everything is ok. We don't check every possible combination for speed reasons. """ for i in range(encoder.minIndex, encoder.maxIndex+1, 1): for j in range(i+1, encoder.maxIndex+1, subsampling): if not encoder._overlapOK(i, j): return False return True class RandomDistributedScalarEncoderTest(unittest.TestCase): """ Unit tests for RandomDistributedScalarEncoder class. """ def testEncoding(self): """ Test basic encoding functionality. Create encodings without crashing and check they contain the correct number of on and off bits. Check some encodings for expected overlap. Test that encodings for old values don't change once we generate new buckets. """ # Initialize with non-default parameters and encode with a number close to # the offset encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, w=23, n=500, offset=0.0) e0 = encoder.encode(-0.1) self.assertEqual(e0.sum(), 23, "Number of on bits is incorrect") self.assertEqual(e0.size, 500, "Width of the vector is incorrect") self.assertEqual(encoder.getBucketIndices(0.0)[0], encoder._maxBuckets / 2, "Offset doesn't correspond to middle bucket") self.assertEqual(len(encoder.bucketMap), 1, "Number of buckets is not 1") # Encode with a number that is resolution away from offset. Now we should # have two buckets and this encoding should be one bit away from e0 e1 = encoder.encode(1.0) self.assertEqual(len(encoder.bucketMap), 2, "Number of buckets is not 2") self.assertEqual(e1.sum(), 23, "Number of on bits is incorrect") self.assertEqual(e1.size, 500, "Width of the vector is incorrect") self.assertEqual(computeOverlap(e0, e1), 22, "Overlap is not equal to w-1") # Encode with a number that is resolution*w away from offset. Now we should # have many buckets and this encoding should have very little overlap with # e0 e25 = encoder.encode(25.0) self.assertGreater(len(encoder.bucketMap), 23, "Number of buckets is not 2") self.assertEqual(e25.sum(), 23, "Number of on bits is incorrect") self.assertEqual(e25.size, 500, "Width of the vector is incorrect") self.assertLess(computeOverlap(e0, e25), 4, "Overlap is too high") # Test encoding consistency. The encodings for previous numbers # shouldn't change even though we have added additional buckets self.assertTrue(numpy.array_equal(e0, encoder.encode(-0.1)), "Encodings are not consistent - they have changed after new buckets " "have been created") self.assertTrue(numpy.array_equal(e1, encoder.encode(1.0)), "Encodings are not consistent - they have changed after new buckets " "have been created") def testMissingValues(self): """ Test that missing values and NaN return all zero's. """ encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0) empty = encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA) self.assertEqual(empty.sum(), 0) empty = encoder.encode(float("nan")) self.assertEqual(empty.sum(), 0) def testResolution(self): """ Test that numbers within the same resolution return the same encoding. Numbers outside the resolution should return different encodings. """ encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0) # Since 23.0 is the first encoded number, it will be the offset. # Since resolution is 1, 22.9 and 23.4 should have the same bucket index and # encoding. e23 = encoder.encode(23.0) e23p1 = encoder.encode(23.1) e22p9 = encoder.encode(22.9) e24 = encoder.encode(24.0) self.assertEqual(e23.sum(), encoder.w) self.assertEqual((e23 == e23p1).sum(), encoder.getWidth(), "Numbers within resolution don't have the same encoding") self.assertEqual((e23 == e22p9).sum(), encoder.getWidth(), "Numbers within resolution don't have the same encoding") self.assertNotEqual((e23 == e24).sum(), encoder.getWidth(), "Numbers outside resolution have the same encoding") e22p9 = encoder.encode(22.5) self.assertNotEqual((e23 == e22p9).sum(), encoder.getWidth(), "Numbers outside resolution have the same encoding") def testMapBucketIndexToNonZeroBits(self): """ Test that mapBucketIndexToNonZeroBits works and that max buckets and clipping are handled properly. """ encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150) # Set a low number of max buckets encoder._initializeBucketMap(10, None) encoder.encode(0.0) encoder.encode(-7.0) encoder.encode(7.0) self.assertEqual(len(encoder.bucketMap), encoder._maxBuckets, "_maxBuckets exceeded") self.assertTrue( numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(-1), encoder.bucketMap[0]), "mapBucketIndexToNonZeroBits did not handle negative" " index") self.assertTrue( numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(1000), encoder.bucketMap[9]), "mapBucketIndexToNonZeroBits did not handle negative index") e23 = encoder.encode(23.0) e6 = encoder.encode(6) self.assertEqual((e23 == e6).sum(), encoder.getWidth(), "Values not clipped correctly during encoding") ep8 = encoder.encode(-8) ep7 = encoder.encode(-7) self.assertEqual((ep8 == ep7).sum(), encoder.getWidth(), "Values not clipped correctly during encoding") self.assertEqual(encoder.getBucketIndices(-8)[0], 0, "getBucketIndices returned negative bucket index") self.assertEqual(encoder.getBucketIndices(23)[0], encoder._maxBuckets-1, "getBucketIndices returned bucket index that is too" " large") def testParameterChecks(self): """ Test that some bad construction parameters get handled. """ # n must be >= 6*w with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name="mv", resolution=1.0, n=int(5.9*21)) # n must be an int with self.assertRaises(ValueError):<|fim▁hole|> with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name="mv", resolution=1.0, w=-1) # resolution can't be negative with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name="mv", resolution=-2) def testOverlapStatistics(self): """ Check that the overlaps for the encodings are within the expected range. Here we ask the encoder to create a bunch of representations under somewhat stressful conditions, and then verify they are correct. We rely on the fact that the _overlapOK and _countOverlapIndices methods are working correctly. """ seed = getSeed() # Generate about 600 encodings. Set n relatively low to increase # chance of false overlaps encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150, seed=seed) encoder.encode(0.0) encoder.encode(-300.0) encoder.encode(300.0) self.assertTrue(validateEncoder(encoder, subsampling=3), "Illegal overlap encountered in encoder") def testGetMethods(self): """ Test that the getWidth, getDescription, and getDecoderOutputFieldTypes methods work. """ encoder = RandomDistributedScalarEncoder(name="theName", resolution=1.0, n=500) self.assertEqual(encoder.getWidth(), 500, "getWidth doesn't return the correct result") self.assertEqual(encoder.getDescription(), [("theName", 0)], "getDescription doesn't return the correct result") self.assertEqual(encoder.getDecoderOutputFieldTypes(), (FieldMetaType.float, ), "getDecoderOutputFieldTypes doesn't return the correct" " result") def testOffset(self): """ Test that offset is working properly """ encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0) encoder.encode(23.0) self.assertEqual(encoder._offset, 23.0, "Offset not specified and not initialized to first input") encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, offset=25.0) encoder.encode(23.0) self.assertEqual(encoder._offset, 25.0, "Offset not initialized to specified constructor" " parameter") def testSeed(self): """ Test that initializing twice with the same seed returns identical encodings and different when not specified """ encoder1 = RandomDistributedScalarEncoder(name="encoder1", resolution=1.0, seed=42) encoder2 = RandomDistributedScalarEncoder(name="encoder2", resolution=1.0, seed=42) encoder3 = RandomDistributedScalarEncoder(name="encoder3", resolution=1.0, seed=-1) encoder4 = RandomDistributedScalarEncoder(name="encoder4", resolution=1.0, seed=-1) e1 = encoder1.encode(23.0) e2 = encoder2.encode(23.0) e3 = encoder3.encode(23.0) e4 = encoder4.encode(23.0) self.assertEqual((e1 == e2).sum(), encoder1.getWidth(), "Same seed gives rise to different encodings") self.assertNotEqual((e1 == e3).sum(), encoder1.getWidth(), "Different seeds gives rise to same encodings") self.assertNotEqual((e3 == e4).sum(), encoder1.getWidth(), "seeds of -1 give rise to same encodings") def testCountOverlapIndices(self): """ Test that the internal method _countOverlapIndices works as expected. """ # Create a fake set of encodings. encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, w=5, n=5*20) midIdx = encoder._maxBuckets/2 encoder.bucketMap[midIdx-2] = numpy.array(range(3, 8)) encoder.bucketMap[midIdx-1] = numpy.array(range(4, 9)) encoder.bucketMap[midIdx] = numpy.array(range(5, 10)) encoder.bucketMap[midIdx+1] = numpy.array(range(6, 11)) encoder.bucketMap[midIdx+2] = numpy.array(range(7, 12)) encoder.bucketMap[midIdx+3] = numpy.array(range(8, 13)) encoder.minIndex = midIdx - 2 encoder.maxIndex = midIdx + 3 # Indices must exist with self.assertRaises(ValueError): encoder._countOverlapIndices(midIdx-3, midIdx-2) with self.assertRaises(ValueError): encoder._countOverlapIndices(midIdx-2, midIdx-3) # Test some overlaps self.assertEqual(encoder._countOverlapIndices(midIdx-2, midIdx-2), 5, "_countOverlapIndices didn't work") self.assertEqual(encoder._countOverlapIndices(midIdx-1, midIdx-2), 4, "_countOverlapIndices didn't work") self.assertEqual(encoder._countOverlapIndices(midIdx+1, midIdx-2), 2, "_countOverlapIndices didn't work") self.assertEqual(encoder._countOverlapIndices(midIdx-2, midIdx+3), 0, "_countOverlapIndices didn't work") def testOverlapOK(self): """ Test that the internal method _overlapOK works as expected. """ # Create a fake set of encodings. encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, w=5, n=5*20) midIdx = encoder._maxBuckets/2 encoder.bucketMap[midIdx-3] = numpy.array(range(4, 9)) # Not ok with # midIdx-1 encoder.bucketMap[midIdx-2] = numpy.array(range(3, 8)) encoder.bucketMap[midIdx-1] = numpy.array(range(4, 9)) encoder.bucketMap[midIdx] = numpy.array(range(5, 10)) encoder.bucketMap[midIdx+1] = numpy.array(range(6, 11)) encoder.bucketMap[midIdx+2] = numpy.array(range(7, 12)) encoder.bucketMap[midIdx+3] = numpy.array(range(8, 13)) encoder.minIndex = midIdx - 3 encoder.maxIndex = midIdx + 3 self.assertTrue(encoder._overlapOK(midIdx, midIdx-1), "_overlapOK didn't work") self.assertTrue(encoder._overlapOK(midIdx-2, midIdx+3), "_overlapOK didn't work") self.assertFalse(encoder._overlapOK(midIdx-3, midIdx-1), "_overlapOK didn't work") # We'll just use our own numbers self.assertTrue(encoder._overlapOK(100, 50, 0), "_overlapOK didn't work for far values") self.assertTrue(encoder._overlapOK(100, 50, encoder._maxOverlap), "_overlapOK didn't work for far values") self.assertFalse(encoder._overlapOK(100, 50, encoder._maxOverlap+1), "_overlapOK didn't work for far values") self.assertTrue(encoder._overlapOK(50, 50, 5), "_overlapOK didn't work for near values") self.assertTrue(encoder._overlapOK(48, 50, 3), "_overlapOK didn't work for near values") self.assertTrue(encoder._overlapOK(46, 50, 1), "_overlapOK didn't work for near values") self.assertTrue(encoder._overlapOK(45, 50, encoder._maxOverlap), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(48, 50, 4), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(48, 50, 2), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(46, 50, 2), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(50, 50, 6), "_overlapOK didn't work for near values") def testCountOverlap(self): """ Test that the internal method _countOverlap works as expected. """ encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, n=500) r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([1, 2, 3, 4, 5, 6]) self.assertEqual(encoder._countOverlap(r1, r2), 6, "_countOverlap result is incorrect") r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([1, 2, 3, 4, 5, 7]) self.assertEqual(encoder._countOverlap(r1, r2), 5, "_countOverlap result is incorrect") r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([6, 5, 4, 3, 2, 1]) self.assertEqual(encoder._countOverlap(r1, r2), 6, "_countOverlap result is incorrect") r1 = numpy.array([1, 2, 8, 4, 5, 6]) r2 = numpy.array([1, 2, 3, 4, 9, 6]) self.assertEqual(encoder._countOverlap(r1, r2), 4, "_countOverlap result is incorrect") r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([1, 2, 3]) self.assertEqual(encoder._countOverlap(r1, r2), 3, "_countOverlap result is incorrect") r1 = numpy.array([7, 8, 9, 10, 11, 12]) r2 = numpy.array([1, 2, 3, 4, 5, 6]) self.assertEqual(encoder._countOverlap(r1, r2), 0, "_countOverlap result is incorrect") def testVerbosity(self): """ Test that nothing is printed out when verbosity=0 """ _stdout = sys.stdout sys.stdout = _stringio = StringIO() encoder = RandomDistributedScalarEncoder(name="mv", resolution=1.0, verbosity=0) output = numpy.zeros(encoder.getWidth(), dtype=defaultDtype) encoder.encodeIntoArray(23.0, output) encoder.getBucketIndices(23.0) sys.stdout = _stdout self.assertEqual(len(_stringio.getvalue()), 0, "zero verbosity doesn't lead to zero output") def testEncodeInvalidInputType(self): encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, verbosity=0) with self.assertRaises(TypeError): encoder.encode("String") @unittest.skipUnless( capnp, "pycapnp is not installed, skipping serialization test.") def testWriteRead(self): original = RandomDistributedScalarEncoder( name="encoder", resolution=1.0, w=23, n=500, offset=0.0) originalValue = original.encode(1) proto1 = RandomDistributedScalarEncoderProto.new_message() original.write(proto1) # Write the proto to a temp file and read it back into a new proto with tempfile.TemporaryFile() as f: proto1.write(f) f.seek(0) proto2 = RandomDistributedScalarEncoderProto.read(f) encoder = RandomDistributedScalarEncoder.read(proto2) self.assertIsInstance(encoder, RandomDistributedScalarEncoder) self.assertEqual(encoder.resolution, original.resolution) self.assertEqual(encoder.w, original.w) self.assertEqual(encoder.n, original.n) self.assertEqual(encoder.name, original.name) self.assertEqual(encoder.verbosity, original.verbosity) self.assertEqual(encoder.minIndex, original.minIndex) self.assertEqual(encoder.maxIndex, original.maxIndex) encodedFromOriginal = original.encode(1) encodedFromNew = encoder.encode(1) self.assertTrue(numpy.array_equal(encodedFromNew, originalValue)) self.assertEqual(original.decode(encodedFromNew), encoder.decode(encodedFromOriginal)) self.assertEqual(original.random.getSeed(), encoder.random.getSeed()) for key, value in original.bucketMap.items(): self.assertTrue(numpy.array_equal(value, encoder.bucketMap[key])) if __name__ == "__main__": unittest.main()<|fim▁end|>
RandomDistributedScalarEncoder(name="mv", resolution=1.0, n=5.9*21) # w can't be negative
<|file_name|>main.py<|end_file_name|><|fim▁begin|>from kivy.lib import osc from time import sleep import pocketclient from kivy.utils import platform as kivy_platform SERVICE_PORT = 4000 def platform(): p = kivy_platform() if p.lower() in ('linux', 'waindows', 'osx'): return 'desktop' else: return p class Service(object): def __init__(self): osc.init() self.last_update = 0 self.oscid = osc.listen(ipAddr='localhost', port=SERVICE_PORT) osc.bind(self.oscid, self.pocket_connect, '/pocket/connect') osc.bind(self.oscid, self.pocket_list, '/pocket/list') osc.bind(self.oscid, self.pocket_mark_read, '/pocket/mark_read') def send(self, **kwargs): osc.sendMsg() def run(self): while self._run: osc.readQueue(self.oscid) sleep(.1) def pocket_connect(self, **kwargs): if 'token' in kwargs: self.token = kwargs['token'] else: pocketclient.authorize(platform(), self.save_pocket_token) def save_pocket_token(self, api_key, token, username): self.token = { 'key': api_key, 'token': token, 'username': username } def pocket_list(self, *args): if not self.token: if self.pocket_last_update: pocketclient.get_items(self.<|fim▁hole|> pass pass def pocket_mark_read(self, *args): pass if __name__ == '__main__': Service().run()<|fim▁end|>
else:
<|file_name|>yaml_load.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python """ A basic example of loading YAML Make sure you use the "safe_load" method and not the "load" method that will give you warnings. References: - https://stackoverflow.com/questions/1773805/how-can-i-parse-a-yaml-file-in-python """ import yaml with open("data_samples/basic.yaml", 'r') as stream: try: data=yaml.safe_load(stream) assert "concepts" in data<|fim▁hole|> print(exc)<|fim▁end|>
except yaml.YAMLError as exc:
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Serkr - An automated theorem prover. Copyright (C) 2015-2016 Mikko Aarnos. // // Serkr is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // Serkr is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Serkr. If not, see <http://www.gnu.org/licenses/>. // //! Contains the parser for the TPTP format. /// Autogenerated code, don't care about any warnings. #[allow(missing_docs)] #[allow(dead_code)]<|fim▁hole|>#[cfg_attr(feature="clippy", allow(clippy))] #[cfg_attr(feature="clippy", allow(clippy_pedantic))] mod parser_grammar; /// Contains a function for parsing a formula in the TPTP format to the abstract syntax tree. #[cfg_attr(rustfmt, rustfmt_skip)] #[cfg_attr(feature="clippy", allow(result_unwrap_used))] pub mod parser; /// The abstract syntax tree the parser constructs. pub mod ast;<|fim▁end|>
#[cfg_attr(rustfmt, rustfmt_skip)]
<|file_name|>bfloat16_test.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test cases for the bfloat16 Python type.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import itertools import math from absl.testing import absltest from absl.testing import parameterized import numpy as np # pylint: disable=unused-import,g-bad-import-order from tensorflow.python.framework import dtypes from tensorflow.python.lib.core import _pywrap_bfloat16 from tensorflow.python.platform import test bfloat16 = _pywrap_bfloat16.TF_bfloat16_type() def numpy_assert_allclose(a, b, **kwargs): a = a.astype(np.float32) if a.dtype == bfloat16 else a b = b.astype(np.float32) if b.dtype == bfloat16 else b return np.testing.assert_allclose(a, b, **kwargs) epsilon = float.fromhex("1.0p-7") # Values that should round trip exactly to float and back. FLOAT_VALUES = [ 0.0, 1.0, -1, 0.5, -0.5, epsilon, 1.0 + epsilon, 1.0 - epsilon, -1.0 - epsilon, -1.0 + epsilon, 3.5, 42.0, 255.0, 256.0, float("inf"), float("-inf"), float("nan") ] class Bfloat16Test(parameterized.TestCase): """Tests the non-numpy Python methods of the bfloat16 type.""" def testRoundTripToFloat(self): for v in FLOAT_VALUES: np.testing.assert_equal(v, float(bfloat16(v))) def testRoundTripNumpyTypes(self): for dtype in [np.float16, np.float32, np.float64]: np.testing.assert_equal(-3.75, dtype(bfloat16(dtype(-3.75)))) np.testing.assert_equal(1.5, float(bfloat16(dtype(1.5)))) np.testing.assert_equal(4.5, dtype(bfloat16(np.array(4.5, dtype)))) np.testing.assert_equal( np.array([2, 5, -1], bfloat16), bfloat16(np.array([2, 5, -1], dtype))) def testRoundTripToInt(self): for v in [-256, -255, -34, -2, -1, 0, 1, 2, 10, 47, 128, 255, 256, 512]: self.assertEqual(v, int(bfloat16(v))) # pylint: disable=g-complex-comprehension @parameterized.named_parameters(({ "testcase_name": "_" + dtype.__name__, "dtype": dtype } for dtype in [bfloat16, np.float16, np.float32, np.float64])) def testRoundTripToNumpy(self, dtype): for v in FLOAT_VALUES: np.testing.assert_equal(v, bfloat16(dtype(v))) np.testing.assert_equal(v, dtype(bfloat16(dtype(v)))) np.testing.assert_equal(v, dtype(bfloat16(np.array(v, dtype))))<|fim▁hole|> np.testing.assert_equal( np.array(FLOAT_VALUES, dtype), bfloat16(np.array(FLOAT_VALUES, dtype)).astype(dtype)) def testStr(self): self.assertEqual("0", str(bfloat16(0.0))) self.assertEqual("1", str(bfloat16(1.0))) self.assertEqual("-3.5", str(bfloat16(-3.5))) self.assertEqual("0.0078125", str(bfloat16(float.fromhex("1.0p-7")))) self.assertEqual("inf", str(bfloat16(float("inf")))) self.assertEqual("-inf", str(bfloat16(float("-inf")))) self.assertEqual("nan", str(bfloat16(float("nan")))) def testRepr(self): self.assertEqual("0", repr(bfloat16(0))) self.assertEqual("1", repr(bfloat16(1))) self.assertEqual("-3.5", repr(bfloat16(-3.5))) self.assertEqual("0.0078125", repr(bfloat16(float.fromhex("1.0p-7")))) self.assertEqual("inf", repr(bfloat16(float("inf")))) self.assertEqual("-inf", repr(bfloat16(float("-inf")))) self.assertEqual("nan", repr(bfloat16(float("nan")))) def testHash(self): self.assertEqual(0, hash(bfloat16(0.0))) self.assertEqual(0x3f80, hash(bfloat16(1.0))) self.assertEqual(0x7fc0, hash(bfloat16(float("nan")))) # Tests for Python operations def testNegate(self): for v in FLOAT_VALUES: np.testing.assert_equal(-v, float(-bfloat16(v))) def testAdd(self): np.testing.assert_equal(0, float(bfloat16(0) + bfloat16(0))) np.testing.assert_equal(1, float(bfloat16(1) + bfloat16(0))) np.testing.assert_equal(0, float(bfloat16(1) + bfloat16(-1))) np.testing.assert_equal(5.5, float(bfloat16(2) + bfloat16(3.5))) np.testing.assert_equal(1.25, float(bfloat16(3.5) + bfloat16(-2.25))) np.testing.assert_equal( float("inf"), float(bfloat16(float("inf")) + bfloat16(-2.25))) np.testing.assert_equal( float("-inf"), float(bfloat16(float("-inf")) + bfloat16(-2.25))) self.assertTrue(math.isnan(float(bfloat16(3.5) + bfloat16(float("nan"))))) # Test type promotion against Numpy scalar values. self.assertEqual(np.float32, type(bfloat16(3.5) + np.float16(2.25))) self.assertEqual(np.float32, type(np.float16(3.5) + bfloat16(2.25))) self.assertEqual(np.float32, type(bfloat16(3.5) + np.float32(2.25))) self.assertEqual(np.float32, type(np.float32(3.5) + bfloat16(2.25))) self.assertEqual(np.float64, type(bfloat16(3.5) + np.float64(2.25))) self.assertEqual(np.float64, type(np.float64(3.5) + bfloat16(2.25))) self.assertEqual(np.float64, type(bfloat16(3.5) + float(2.25))) self.assertEqual(np.float64, type(float(3.5) + bfloat16(2.25))) self.assertEqual(np.float32, type(bfloat16(3.5) + np.array(2.25, np.float32))) self.assertEqual(np.float32, type(np.array(3.5, np.float32) + bfloat16(2.25))) def testSub(self): np.testing.assert_equal(0, float(bfloat16(0) - bfloat16(0))) np.testing.assert_equal(1, float(bfloat16(1) - bfloat16(0))) np.testing.assert_equal(2, float(bfloat16(1) - bfloat16(-1))) np.testing.assert_equal(-1.5, float(bfloat16(2) - bfloat16(3.5))) np.testing.assert_equal(5.75, float(bfloat16(3.5) - bfloat16(-2.25))) np.testing.assert_equal( float("-inf"), float(bfloat16(-2.25) - bfloat16(float("inf")))) np.testing.assert_equal( float("inf"), float(bfloat16(-2.25) - bfloat16(float("-inf")))) self.assertTrue(math.isnan(float(bfloat16(3.5) - bfloat16(float("nan"))))) def testMul(self): np.testing.assert_equal(0, float(bfloat16(0) * bfloat16(0))) np.testing.assert_equal(0, float(bfloat16(1) * bfloat16(0))) np.testing.assert_equal(-1, float(bfloat16(1) * bfloat16(-1))) np.testing.assert_equal(-7.875, float(bfloat16(3.5) * bfloat16(-2.25))) np.testing.assert_equal( float("-inf"), float(bfloat16(float("inf")) * bfloat16(-2.25))) np.testing.assert_equal( float("inf"), float(bfloat16(float("-inf")) * bfloat16(-2.25))) self.assertTrue(math.isnan(float(bfloat16(3.5) * bfloat16(float("nan"))))) def testDiv(self): self.assertTrue(math.isnan(float(bfloat16(0) / bfloat16(0)))) np.testing.assert_equal(float("inf"), float(bfloat16(1) / bfloat16(0))) np.testing.assert_equal(-1, float(bfloat16(1) / bfloat16(-1))) np.testing.assert_equal(-1.75, float(bfloat16(3.5) / bfloat16(-2))) np.testing.assert_equal( float("-inf"), float(bfloat16(float("inf")) / bfloat16(-2.25))) np.testing.assert_equal( float("inf"), float(bfloat16(float("-inf")) / bfloat16(-2.25))) self.assertTrue(math.isnan(float(bfloat16(3.5) / bfloat16(float("nan"))))) def testLess(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v < w, bfloat16(v) < bfloat16(w)) def testLessEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v <= w, bfloat16(v) <= bfloat16(w)) def testGreater(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v > w, bfloat16(v) > bfloat16(w)) def testGreaterEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v >= w, bfloat16(v) >= bfloat16(w)) def testEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v == w, bfloat16(v) == bfloat16(w)) def testNotEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v != w, bfloat16(v) != bfloat16(w)) def testNan(self): a = np.isnan(bfloat16(float("nan"))) self.assertTrue(a) numpy_assert_allclose(np.array([1.0, a]), np.array([1.0, a])) a = np.array([bfloat16(1.34375), bfloat16(1.4375), bfloat16(float("nan"))], dtype=bfloat16) b = np.array( [bfloat16(1.3359375), bfloat16(1.4375), bfloat16(float("nan"))], dtype=bfloat16) numpy_assert_allclose( a, b, rtol=0.1, atol=0.1, equal_nan=True, err_msg="", verbose=True) def testSort(self): values_to_sort = np.float32(FLOAT_VALUES) sorted_f32 = np.sort(values_to_sort) sorted_bf16 = np.sort(values_to_sort.astype(bfloat16)) np.testing.assert_equal(sorted_f32, np.float32(sorted_bf16)) BinaryOp = collections.namedtuple("BinaryOp", ["op"]) UNARY_UFUNCS = [ np.negative, np.positive, np.absolute, np.fabs, np.rint, np.sign, np.conjugate, np.exp, np.exp2, np.expm1, np.log, np.log10, np.log1p, np.log2, np.sqrt, np.square, np.cbrt, np.reciprocal, np.sin, np.cos, np.tan, np.arcsin, np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, np.arcsinh, np.arccosh, np.arctanh, np.deg2rad, np.rad2deg, np.floor, np.ceil, np.trunc ] BINARY_UFUNCS = [ np.add, np.subtract, np.multiply, np.divide, np.logaddexp, np.logaddexp2, np.floor_divide, np.power, np.remainder, np.fmod, np.heaviside, np.arctan2, np.hypot, np.maximum, np.minimum, np.fmax, np.fmin, np.copysign ] BINARY_PREDICATE_UFUNCS = [ np.equal, np.not_equal, np.less, np.greater, np.less_equal, np.greater_equal, np.logical_and, np.logical_or, np.logical_xor ] class Bfloat16NumPyTest(parameterized.TestCase): """Tests the NumPy integration of the bfloat16 type.""" def testDtype(self): self.assertEqual(bfloat16, np.dtype(bfloat16)) def testDeepCopyDoesNotAlterHash(self): # For context, see https://github.com/google/jax/issues/4651. If the hash # value of the type descriptor is not initialized correctly, a deep copy # can change the type hash. dtype = np.dtype(bfloat16) h = hash(dtype) _ = copy.deepcopy(dtype) self.assertEqual(h, hash(dtype)) def testArray(self): x = np.array([[1, 2, 3]], dtype=bfloat16) self.assertEqual(bfloat16, x.dtype) self.assertEqual("[[1 2 3]]", str(x)) np.testing.assert_equal(x, x) numpy_assert_allclose(x, x) self.assertTrue((x == x).all()) def testComparisons(self): x = np.array([401408, 7, -32], dtype=np.float32) bx = x.astype(bfloat16) y = np.array([82432, 7, 0], dtype=np.float32) by = y.astype(bfloat16) np.testing.assert_equal(x == y, bx == by) np.testing.assert_equal(x != y, bx != by) np.testing.assert_equal(x < y, bx < by) np.testing.assert_equal(x > y, bx > by) np.testing.assert_equal(x <= y, bx <= by) np.testing.assert_equal(x >= y, bx >= by) def testEqual2(self): a = np.array([401408], bfloat16) b = np.array([82432], bfloat16) self.assertFalse(a.__eq__(b)) def testCasts(self): for dtype in [ np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64, np.complex64, np.complex128, np.uint8, np.uint16, np.uint32, np.uint64, np.intc, np.int_, np.longlong, np.uintc, np.ulonglong ]: x = np.array([[1, 2, 3]], dtype=dtype) y = x.astype(bfloat16) z = y.astype(dtype) self.assertTrue(np.all(x == y)) self.assertEqual(bfloat16, y.dtype) self.assertTrue(np.all(x == z)) self.assertEqual(dtype, z.dtype) def testConformNumpyComplex(self): for dtype in [np.complex64, np.complex128]: x = np.array([1.1, 2.2 + 2.2j, 3.3], dtype=dtype) y_np = x.astype(np.float32) y_tf = x.astype(bfloat16) numpy_assert_allclose(y_np, y_tf, atol=2e-2) z_np = y_np.astype(dtype) z_tf = y_tf.astype(dtype) numpy_assert_allclose(z_np, z_tf, atol=2e-2) def testArange(self): np.testing.assert_equal( np.arange(100, dtype=np.float32).astype(bfloat16), np.arange(100, dtype=bfloat16)) np.testing.assert_equal( np.arange(-10.5, 7.8, 0.5, dtype=np.float32).astype(bfloat16), np.arange(-10.5, 7.8, 0.5, dtype=bfloat16)) np.testing.assert_equal( np.arange(-0., -7., -0.25, dtype=np.float32).astype(bfloat16), np.arange(-0., -7., -0.25, dtype=bfloat16)) np.testing.assert_equal( np.arange(-16384., 16384., 64., dtype=np.float32).astype(bfloat16), np.arange(-16384., 16384., 64., dtype=bfloat16)) # pylint: disable=g-complex-comprehension @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in UNARY_UFUNCS)) def testUnaryUfunc(self, op): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7, 10).astype(bfloat16) numpy_assert_allclose( op(x).astype(np.float32), op(x.astype(np.float32)), rtol=1e-2) @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in BINARY_UFUNCS)) def testBinaryUfunc(self, op): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7, 10).astype(bfloat16) y = rng.randn(4, 1, 7, 10).astype(bfloat16) numpy_assert_allclose( op(x, y).astype(np.float32), op(x.astype(np.float32), y.astype(np.float32)), rtol=1e-2) @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in BINARY_PREDICATE_UFUNCS)) def testBinaryPredicateUfunc(self, op): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) y = rng.randn(4, 1, 7).astype(bfloat16) np.testing.assert_equal( op(x, y), op(x.astype(np.float32), y.astype(np.float32))) @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in [np.isfinite, np.isinf, np.isnan, np.signbit, np.logical_not])) def testPredicateUfunc(self, op): rng = np.random.RandomState(seed=42) shape = (3, 7, 10) posinf_flips = rng.rand(*shape) < 0.1 neginf_flips = rng.rand(*shape) < 0.1 nan_flips = rng.rand(*shape) < 0.1 vals = rng.randn(*shape) vals = np.where(posinf_flips, np.inf, vals) vals = np.where(neginf_flips, -np.inf, vals) vals = np.where(nan_flips, np.nan, vals) vals = vals.astype(bfloat16) np.testing.assert_equal(op(vals), op(vals.astype(np.float32))) def testDivmod(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) y = rng.randn(4, 1, 7).astype(bfloat16) o1, o2 = np.divmod(x, y) e1, e2 = np.divmod(x.astype(np.float32), y.astype(np.float32)) numpy_assert_allclose(o1, e1, rtol=1e-2) numpy_assert_allclose(o2, e2, rtol=1e-2) def testModf(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) o1, o2 = np.modf(x) e1, e2 = np.modf(x.astype(np.float32)) numpy_assert_allclose(o1.astype(np.float32), e1, rtol=1e-2) numpy_assert_allclose(o2.astype(np.float32), e2, rtol=1e-2) def testLdexp(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) y = rng.randint(-50, 50, (1, 7)) numpy_assert_allclose( np.ldexp(x, y).astype(np.float32), np.ldexp(x.astype(np.float32), y), rtol=1e-2, atol=1e-6) def testFrexp(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) mant1, exp1 = np.frexp(x) mant2, exp2 = np.frexp(x.astype(np.float32)) np.testing.assert_equal(exp1, exp2) numpy_assert_allclose(mant1, mant2, rtol=1e-2) def testNextAfter(self): one = np.array(1., dtype=bfloat16) two = np.array(2., dtype=bfloat16) zero = np.array(0., dtype=bfloat16) nan = np.array(np.nan, dtype=bfloat16) np.testing.assert_equal(np.nextafter(one, two) - one, epsilon) np.testing.assert_equal(np.nextafter(one, zero) - one, -epsilon / 2) np.testing.assert_equal(np.isnan(np.nextafter(nan, one)), True) np.testing.assert_equal(np.isnan(np.nextafter(one, nan)), True) np.testing.assert_equal(np.nextafter(one, one), one) smallest_denormal = float.fromhex("1.0p-133") np.testing.assert_equal(np.nextafter(zero, one), smallest_denormal) np.testing.assert_equal(np.nextafter(zero, -one), -smallest_denormal) for a, b in itertools.permutations([0., -0., nan], 2): np.testing.assert_equal( np.nextafter( np.array(a, dtype=np.float32), np.array(b, dtype=np.float32)), np.nextafter( np.array(a, dtype=bfloat16), np.array(b, dtype=bfloat16))) if __name__ == "__main__": absltest.main()<|fim▁end|>
if dtype != bfloat16:
<|file_name|>Asset.py<|end_file_name|><|fim▁begin|>''' Created on 11May,2016 @author: linyufeng ''' from utils.TimeZoneConverter import TimeZoneConverter class Asset(object): ''' contain the values will be insert into table Asset ''' convert = TimeZoneConverter(); def __init__(self, startTime, endTime, directory, fileName, fileType, duration, sequence): self.startTime = self.convert.victoriaToUCT(startTime) self.endTime = self.convert.victoriaToUCT(endTime) self.directory = directory self.fileName = fileName self.fileType = fileType self.duration = int(duration) self.sequence = int(sequence) def getStartTime(self): return self.startTime def getEndTime(self): return self.endTime def getDirectory(self): return self.directory def getFileName(self): return self.fileName def getFileType(self): return self.fileType def getDuration(self): return self.duration def getSequence(self): return self.sequence def __eq__(self,other): if isinstance(other, self.__class__): if self.startTime == other.startTime: if self.endTime == other.endTime: if self.directory == other.directory: if self.duration == other.duration: if self.fileName == other.fileName:<|fim▁hole|><|fim▁end|>
if self.fileType == other.fileType: return True return False
<|file_name|>development.js<|end_file_name|><|fim▁begin|>import browserSync from 'browser-sync'; import config from '../../config'; import middlewaresStack from '../middlewares_stack'; import apiMiddleware from '../middlewares/api'; import mockMiddleware from '../middlewares/mock'; export default () => { const port = process.env.PORT; const middlewares = apiMiddleware() || mockMiddleware(); const server = browserSync.create(); server.init({ port, open: false, notify: false, server: { baseDir: config.distDir, middleware(req, res, next) { middlewaresStack(middlewares, req, res, next); } }, files: [ `${config.distDir}/**/*`,<|fim▁hole|><|fim▁end|>
`!${config.distDir}/**/*.map` ] }); };
<|file_name|>PlusInfinity.java<|end_file_name|><|fim▁begin|>/* Copyright 2008, 2009, 2010 by the Oxford University Computing Laboratory This file is part of HermiT. HermiT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. HermiT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with HermiT. If not, see <http://www.gnu.org/licenses/>. */ package org.semanticweb.HermiT.datatypes.owlreal; public final class PlusInfinity extends Number { private static final long serialVersionUID = -205551124673073593L; public static final PlusInfinity INSTANCE = new PlusInfinity(); private PlusInfinity() { } public boolean equals(Object that) { return this == that; } public String toString() { return "+INF"; } public double doubleValue() {<|fim▁hole|> throw new UnsupportedOperationException(); } public int intValue() { throw new UnsupportedOperationException(); } public long longValue() { throw new UnsupportedOperationException(); } protected Object readResolve() { return INSTANCE; } }<|fim▁end|>
throw new UnsupportedOperationException(); } public float floatValue() {
<|file_name|>command_line.py<|end_file_name|><|fim▁begin|>''' command_line.py Utility functions for reading command line arguments. Author: Martin Norbury Novemeber 2013 ''' import inspect import argparse def command_line(fn): ''' A decorator for functions intented to be run from the command line. This decorator introspects the method signature of the wrapped function to configures and parses command line arguments. Positional arguments translate to required command line arguments. Arguments with defaults supplied are assumed to be optional e.g. def myfunction(a,b=1): ... Can be called from the command line as:- > myfunction <a> [--b=value] All arguments are assumed to be strings at this point. ''' def wrapper_fn(*args, **kwargs): # Get the original function's method signature arguments, varargs, kwargs, defaults = inspect.getargspec(fn) # Get required and optional arguments required_length = -len(defaults) if defaults else len(arguments) required_arguments = arguments[:required_length] optional_arguments = arguments[required_length:] # Create a list of optional arguments of the form (name, value) optional_arguments_with_defaults = [] if optional_arguments: optional_arguments_with_defaults = zip(optional_arguments, defaults) # Create a command line parser parser = argparse.ArgumentParser() # Configure required arguments for argument in required_arguments: parser.add_argument('{0}'.format(argument))<|fim▁hole|> for argument, default in optional_arguments_with_defaults: parser.add_argument('--{0}'.format(argument), type=type(default), default=default) # Parse the command line arguments args = parser.parse_args() # Call the original function with command line supplied arguments result = fn(**dict(args._get_kwargs())) return result return wrapper_fn<|fim▁end|>
# Configure optional arguments, setting defaults appropriately.
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>__all__ = ["Transition"] class Transition(object): def __init__(self, startState, nextState, word, suffix, marked): self.startState = startState self.nextState = nextState self.word = word self.suffix = suffix self.marked = False def similarTransitions(self, transitions): for transition in transitions: if (self.startState == transition.startState and <|fim▁hole|> yield transition<|fim▁end|>
self.nextState == transition.nextState):
<|file_name|>test_skip_examples.py<|end_file_name|><|fim▁begin|># vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import test class ExampleSkipTestCase(test.TestCase): test_counter = 0 @test.skip_test("Example usage of @test.skip_test()") def test_skip_test_example(self): self.fail("skip_test failed to work properly.") @test.skip_if(True, "Example usage of @test.skip_if()") def test_skip_if_example(self): self.fail("skip_if failed to work properly.") @test.skip_unless(False, "Example usage of @test.skip_unless()") def test_skip_unless_example(self): self.fail("skip_unless failed to work properly.") @test.skip_if(False, "This test case should never be skipped.") def test_001_increase_test_counter(self): ExampleSkipTestCase.test_counter += 1 @test.skip_unless(True, "This test case should never be skipped.") def test_002_increase_test_counter(self): ExampleSkipTestCase.test_counter += 1 <|fim▁hole|> def test_003_verify_test_counter(self): self.assertEquals(ExampleSkipTestCase.test_counter, 2, "Tests were not skipped appropriately")<|fim▁end|>
<|file_name|>session.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # Copyright (C) 2004-2009 Edgewall Software # Copyright (C) 2004 Daniel Lundin <[email protected]> # Copyright (C) 2004-2006 Christopher Lenz <[email protected]> # Copyright (C) 2006 Jonas Borgström <[email protected]> # Copyright (C) 2008 Matt Good <[email protected]> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/log/. # # Author: Daniel Lundin <[email protected]> # Christopher Lenz <[email protected]> import time from trac.core import TracError from trac.db.util import with_transaction from trac.util import hex_entropy from trac.util.html import Markup UPDATE_INTERVAL = 3600 * 24 # Update session last_visit time stamp after 1 day PURGE_AGE = 3600 * 24 * 90 # Purge session after 90 days idle COOKIE_KEY = 'trac_session' class DetachedSession(dict): def __init__(self, env, sid): dict.__init__(self) self.env = env self.sid = None self.last_visit = 0 self._new = True self._old = {} if sid: self.get_session(sid, authenticated=True) else: self.authenticated = False def __setitem__(self, key, value): dict.__setitem__(self, key, unicode(value)) def get_session(self, sid, authenticated=False): self.env.log.debug('Retrieving session for ID %r', sid) db = self.env.get_db_cnx() cursor = db.cursor() self.sid = sid self.authenticated = authenticated cursor.execute("SELECT last_visit FROM session " "WHERE sid=%s AND authenticated=%s", (sid, int(authenticated))) row = cursor.fetchone() if not row: return self._new = False self.last_visit = int(row[0] or 0) cursor.execute("SELECT name,value FROM session_attribute " "WHERE sid=%s and authenticated=%s", (sid, int(authenticated))) for name, value in cursor: self[name] = value self._old.update(self) def save(self): if not self._old and not self.items(): # The session doesn't have associated data, so there's no need to # persist it return authenticated = int(self.authenticated) now = int(time.time()) @with_transaction(self.env) def delete_session_cookie(db): cursor = db.cursor() if self._new: self.last_visit = now self._new = False # The session might already exist even if _new is True since # it could have been created by a concurrent request (#3563). try: cursor.execute("INSERT INTO session " " (sid,last_visit,authenticated)" " VALUES (%s,%s,%s)", (self.sid, self.last_visit, authenticated)) except Exception: db.rollback() self.env.log.warning('Session %s already exists', self.sid) if self._old != self: attrs = [(self.sid, authenticated, k, v) for k, v in self.items()] cursor.execute("DELETE FROM session_attribute WHERE sid=%s", (self.sid,)) self._old = dict(self.items()) if attrs: # The session variables might already have been updated # by a concurrent request. try: cursor.executemany("INSERT INTO session_attribute " " (sid,authenticated,name,value) " " VALUES (%s,%s,%s,%s)", attrs) except Exception: db.rollback() self.env.log.warning('Attributes for session %s ' 'already updated', self.sid) elif not authenticated: # No need to keep around empty unauthenticated sessions cursor.execute("DELETE FROM session " "WHERE sid=%s AND authenticated=0", (self.sid,)) return # Update the session last visit time if it is over an hour old, # so that session doesn't get purged if now - self.last_visit > UPDATE_INTERVAL: self.last_visit = now self.env.log.info("Refreshing session %s", self.sid) cursor.execute('UPDATE session SET last_visit=%s ' 'WHERE sid=%s AND authenticated=%s', (self.last_visit, self.sid, authenticated)) # Purge expired sessions. We do this only when the session was # changed as to minimize the purging. mintime = now - PURGE_AGE self.env.log.debug('Purging old, expired, sessions.') cursor.execute("DELETE FROM session_attribute " "WHERE authenticated=0 AND sid " "IN (SELECT sid FROM session WHERE " "authenticated=0 AND last_visit < %s)", (mintime,)) cursor.execute("DELETE FROM session WHERE " "authenticated=0 AND last_visit < %s", (mintime,)) class Session(DetachedSession): """Basic session handling and per-session storage.""" def __init__(self, env, req): super(Session, self).__init__(env, None) self.req = req if req.authname == 'anonymous': if not req.incookie.has_key(COOKIE_KEY): self.sid = hex_entropy(24) self.bake_cookie() else: sid = req.incookie[COOKIE_KEY].value self.get_session(sid) else: if req.incookie.has_key(COOKIE_KEY): sid = req.incookie[COOKIE_KEY].value self.promote_session(sid) self.get_session(req.authname, authenticated=True) def bake_cookie(self, expires=PURGE_AGE): assert self.sid, 'Session ID not set' self.req.outcookie[COOKIE_KEY] = self.sid self.req.outcookie[COOKIE_KEY]['path'] = self.req.base_path or '/' self.req.outcookie[COOKIE_KEY]['expires'] = expires if self.env.secure_cookies: self.req.outcookie[COOKIE_KEY]['secure'] = True def get_session(self, sid, authenticated=False): refresh_cookie = False if self.sid and sid != self.sid: refresh_cookie = True super(Session, self).get_session(sid, authenticated) if self.last_visit and time.time() - self.last_visit > UPDATE_INTERVAL: refresh_cookie = True # Refresh the session cookie if this is the first visit since over a day<|fim▁hole|> self.bake_cookie() def change_sid(self, new_sid): assert self.req.authname == 'anonymous', \ 'Cannot change ID of authenticated session' assert new_sid, 'Session ID cannot be empty' if new_sid == self.sid: return cursor = self.env.get_db_cnx().cursor() cursor.execute("SELECT sid FROM session WHERE sid=%s", (new_sid,)) if cursor.fetchone(): raise TracError(Markup('Session "%s" already exists.<br />' 'Please choose a different session ID.') % new_sid, 'Error renaming session') self.env.log.debug('Changing session ID %s to %s', self.sid, new_sid) @with_transaction(self.env) def update_session_id(db): cursor = db.cursor() cursor.execute("UPDATE session SET sid=%s WHERE sid=%s " "AND authenticated=0", (new_sid, self.sid)) cursor.execute("UPDATE session_attribute SET sid=%s " "WHERE sid=%s and authenticated=0", (new_sid, self.sid)) self.sid = new_sid self.bake_cookie() def promote_session(self, sid): """Promotes an anonymous session to an authenticated session, if there is no preexisting session data for that user name. """ assert self.req.authname != 'anonymous', \ 'Cannot promote session of anonymous user' @with_transaction(self.env) def update_session_id(db): cursor = db.cursor() cursor.execute("SELECT authenticated FROM session " "WHERE sid=%s OR sid=%s ", (sid, self.req.authname)) authenticated_flags = [row[0] for row in cursor.fetchall()] if len(authenticated_flags) == 2: # There's already an authenticated session for the user, # we simply delete the anonymous session cursor.execute("DELETE FROM session WHERE sid=%s " "AND authenticated=0", (sid,)) cursor.execute("DELETE FROM session_attribute WHERE sid=%s " "AND authenticated=0", (sid,)) elif len(authenticated_flags) == 1: if not authenticated_flags[0]: # Update the anomymous session records so the session ID # becomes the user name, and set the authenticated flag. self.env.log.debug('Promoting anonymous session %s to ' 'authenticated session for user %s', sid, self.req.authname) cursor.execute("UPDATE session SET sid=%s,authenticated=1 " "WHERE sid=%s AND authenticated=0", (self.req.authname, sid)) cursor.execute("UPDATE session_attribute " "SET sid=%s,authenticated=1 WHERE sid=%s", (self.req.authname, sid)) else: # we didn't have an anonymous session for this sid cursor.execute("INSERT INTO session " "(sid,last_visit,authenticated)" " VALUES(%s,%s,1)", (self.req.authname, int(time.time()))) self._new = False self.sid = sid self.bake_cookie(0) # expire the cookie<|fim▁end|>
if not authenticated and refresh_cookie:
<|file_name|>smartLegend.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import markdown from markdown.treeprocessors import Treeprocessor from markdown.blockprocessors import BlockProcessor import re from markdown import util import xml.etree.ElementTree as ET import copy from markdown.inlinepatterns import IMAGE_LINK_RE class InFigureParser(object): def transform(self, parent, element, legend, index, InP = False): if InP: lelems = list(element.iter()) oldImg = lelems[-1] element.remove(oldImg) else: oldImg = element nFig = util.etree.Element("figure") nFigCaption = util.etree.Element("figcaption") contentLegend = legend.items() for el in legend: legend.remove(el) nFigCaption.append(el) nFig.append(oldImg) nFig.append(nFigCaption) parent.remove(element) parent.remove(legend) parent.insert(index, nFig) class FigureParser(InFigureParser): def __init__(self, ignoringImg): InFigureParser.__init__(self) self.ignoringImg = ignoringImg self.ree = re.compile(r"^" + IMAGE_LINK_RE + r"(\n|$)") def detect(self, element, type): if element == None: return False lelems = list(element.iter()) #print repr(element.text) return (type == "unknown" or type == "Figure") \ and element.tag=="p" \ and( ( element.text is not None \ and self.ree.search(element.text)) \ or ( (element.text is None or element.text.strip() == "") \ and (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is element)) \ and lelems[-1].tag == "img" \ and (lelems[-1].attrib["src"] not in self.ignoringImg))) def transform(self, parent, element, legend, index): InFigureParser.transform(self, parent, element, legend, index, True) class EquationParser(InFigureParser): def detect(self, element, type): if element == None: return False lelems = list(element.iter()) return (type == "unknown" or type == "Equation") \ and element.tag=="p" \ and (element.text is None or element.text.strip() == "") \ and (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is element)) \ and lelems[-1].tag == "mathjax" def transform(self, parent, element, legend, index): InFigureParser.transform(self, parent, element, legend, index, True) class CodeParser(InFigureParser): def __init__(self, md): self.md = md def detect(self, element, type): if element == None: return False if (type == "unknown" or type == "Code") and element.tag=="p" : hs = self.md.htmlStash for i in range(hs.html_counter): if element.text == hs.get_placeholder(i) : Teste = ET.fromstring(hs.rawHtmlBlocks[i][0].encode('utf-8')) if Teste is not None and Teste.tag=="table" and "class" in Teste.attrib and Teste.attrib["class"] == "codehilitetable":<|fim▁hole|> return False class QuoteParser(InFigureParser): def detect(self, element, type): if element == None: return False return (type == "unknown" or type == "Source") and element.tag=="blockquote" class TableParser(object): def detect(self, element, type): if element == None: return False return (type == "unknown" or type == "Table") and element.tag=="table" def transform(self, parent, element, legend, index): parent.remove(legend) cap = util.etree.Element('caption') contentLegend = legend.items() for el in legend: legend.remove(el) cap.append(el) element.insert(0, cap) class VideoParser(InFigureParser): def detect(self, element, type): if element == None: return False lelems = list(element.iter()) return (type == "unknown" or type == "Video") \ and element.tag=="iframe" class SmartLegendProcessor(Treeprocessor): def __init__(self, parser, configs, md): Treeprocessor.__init__(self, parser) self.configs = configs self.processors = ( FigureParser(configs["IGNORING_IMG"]), EquationParser(), CodeParser(md), TableParser(), VideoParser(), QuoteParser()) def run(self, root): root = self.parse_legend(root) root = self.parse_autoimg(root) return root def parse_legend(self, root): elemsToInspect = [root] while len(elemsToInspect) > 0: elem = elemsToInspect.pop() Restart=True while Restart: Restart = False precedent = None i=0 for nelem in elem: if nelem.tag in self.configs["PARENTS"] and nelem not in elemsToInspect: elemsToInspect.append(nelem) if nelem.tag == "customlegend" and precedent is not None : # and len(list(nelem.itertext())) == 0 : proc = self.detectElement(precedent, nelem.attrib["type"]) if proc is not None: proc.transform(elem, precedent, nelem, i-1) Restart = True break precedent = nelem i+=1 return root def parse_autoimg(self, root): elemsToInspect = [root] while len(elemsToInspect) > 0: elem = elemsToInspect.pop() Restart=True while Restart: Restart = False i=0 for nelem in elem: if nelem.tag in self.configs["PARENTS"] and nelem not in elemsToInspect: elemsToInspect.append(nelem) #Auto Legend for image if nelem.tag == 'p' and len(list(nelem.itertext())) == 0 : lelems = list(nelem.iter()) if (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is nelem)) \ and lelems[-1].tag == "img" \ and lelems[-1].attrib["alt"] != "" \ and not (lelems[-1].attrib["src"] in self.configs["IGNORING_IMG"]): oldImg = lelems[-1] nelem.remove(oldImg) nFig = util.etree.Element("figure") nFigCaption = util.etree.Element("figcaption") nFigCaption.text = oldImg.attrib["alt"] oldImg.attrib["alt"]="" nFig.append(oldImg) nFig.append(nFigCaption) nelem.insert(i-1, nFig) Restart = True break i+=1 return root def detectElement(self, elem, legend): for proc in self.processors: if proc.detect(elem, legend) : return proc return None class LegendProcessor(BlockProcessor): def __init__(self, parser, md, configs): BlockProcessor.__init__(self, parser) self.md = md self.configs = configs self.processors = ( FigureParser(configs["IGNORING_IMG"]), EquationParser(), CodeParser(md), TableParser(), VideoParser(), QuoteParser()) self.RE = re.compile(r'(^|(?<=\n))((?P<typelegend>Figure|Table|Code|Equation|Video|Source)\s?)*\:\s?(?P<txtlegend>.*?)(\n|$)') def detectElement(self, elem, legend): for proc in self.processors: if proc.detect(elem, legend) : return proc return None def test(self, parent, block): mLeg = self.RE.search(block) if not bool(mLeg): return False else: return True def test_complete(self, parent, block): mLeg = self.RE.search(block) gd = mLeg.groupdict() if gd["typelegend"] is None: type = "unknown" else: type = gd["typelegend"] sibling = self.lastChild(parent) return self.detectElement(sibling, type) is not None def run(self, parent, blocks): block = blocks.pop(0) mLeg = self.RE.search(block) before = block[:mLeg.start()] after = block[mLeg.end():] contentStart = block[mLeg.start():mLeg.start("txtlegend")] cpp = None if before: cpp = copy.copy(parent) self.parser.parseBlocks(cpp, [before]) else: cpp = parent if not self.test_complete(cpp, block): blocks.insert(0, block) return False elif before: self.parser.parseBlocks(parent, [before]) nLegend = util.etree.Element("customlegend") self.parser.parseChunk(nLegend, mLeg.group('txtlegend')) gd = mLeg.groupdict() if gd["typelegend"] is None: nLegend.set("type", "unknown") else: nLegend.set("type", gd["typelegend"]) nLegend.set("rawStart", contentStart) parent.append(nLegend) if after: blocks.insert(0,after) class SmartLegendExtension(markdown.extensions.Extension): def __init__(self, configs={}): self.configs = { "IGNORING_IMG" : [], "PARENTS" : [], } for key, value in configs.items(): self.configs[key] = value if "div" not in self.configs["PARENTS"]: self.configs["PARENTS"].append("div") pass def extendMarkdown(self, md, md_globals): md.registerExtension(self) md.treeprocessors.add('smart-legend', SmartLegendProcessor(md.parser,self.configs, md),"_end") md.parser.blockprocessors.add('legend-processor', LegendProcessor(md.parser,md, self.configs),"_begin") def makeExtension(configs={}): return SmartImgExtension(configs=configs)<|fim▁end|>
return True else: return False
<|file_name|>qa_packed_to_unpacked.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # # Copyright 2005,2007,2010,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest import blocks_swig as blocks import random class test_packing(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block () def tearDown(self): self.tb = None def test_001(self): src_data = (0x80,) expected_results = (1,0,0,0,0,0,0,0) src = gr.vector_source_b(src_data, False) op = blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op) self.tb.connect(op, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_002(self): src_data = (0x80,) expected_results = (0,0,0,0,0,0,0,1) src = gr.vector_source_b(src_data, False) op = blocks.packed_to_unpacked_bb(1, gr.GR_LSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op) self.tb.connect(op, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_003(self): src_data = (0x11,) expected_results = (4, 2) src = gr.vector_source_b(src_data, False) op = blocks.packed_to_unpacked_bb(3, gr.GR_LSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op) self.tb.connect(op, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_004(self): src_data = (0x11,) expected_results = (0, 4) src = gr.vector_source_b(src_data, False) op = blocks.packed_to_unpacked_bb(3, gr.GR_MSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op) self.tb.connect(op, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_005(self): src_data = (1,0,0,0,0,0,1,0,0,1,0,1,1,0,1,0) expected_results = (0x82, 0x5a) src = gr.vector_source_b(src_data, False) op = blocks.unpacked_to_packed_bb(1, gr.GR_MSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op) self.tb.connect(op, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_006(self): src_data = (0,1,0,0,0,0,0,1,0,1,0,1,1,0,1,0) expected_results = (0x82, 0x5a) src = gr.vector_source_b(src_data, False) op = blocks.unpacked_to_packed_bb(1, gr.GR_LSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op) self.tb.connect(op, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_007(self): src_data = (4, 2, 0,0,0) expected_results = (0x11,) src = gr.vector_source_b(src_data, False) op = blocks.unpacked_to_packed_bb(3, gr.GR_LSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op) self.tb.connect(op, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_008(self): src_data = (0, 4, 2,0,0) expected_results = (0x11,) src = gr.vector_source_b(src_data,False) op = blocks.unpacked_to_packed_bb(3, gr.GR_MSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op) self.tb.connect(op, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_009(self): random.seed(0) src_data = [] for i in xrange(202): src_data.append((random.randint(0,255))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_b(tuple(src_data), False) op1 = blocks.packed_to_unpacked_bb(3, gr.GR_MSB_FIRST) op2 = blocks.unpacked_to_packed_bb(3, gr.GR_MSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results[0:201], dst.data()) def test_010(self): random.seed(0) src_data = [] for i in xrange(56): src_data.append((random.randint(0,255))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_b(tuple(src_data), False) op1 = blocks.packed_to_unpacked_bb(7, gr.GR_MSB_FIRST) op2 = blocks.unpacked_to_packed_bb(7, gr.GR_MSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results[0:201], dst.data()) def test_011(self): random.seed(0) src_data = [] for i in xrange(56): src_data.append((random.randint(0,255))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_b(tuple(src_data),False) op1 = blocks.packed_to_unpacked_bb(7, gr.GR_LSB_FIRST) op2 = blocks.unpacked_to_packed_bb(7, gr.GR_LSB_FIRST) dst = gr.vector_sink_b() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results[0:201], dst.data()) # tests on shorts def test_100a(self): random.seed(0) src_data = [] for i in xrange(100): src_data.append((random.randint(-2**15,2**15-1))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_s(tuple(src_data), False) op1 = blocks.packed_to_unpacked_ss(1, gr.GR_MSB_FIRST) op2 = blocks.unpacked_to_packed_ss(1, gr.GR_MSB_FIRST) dst = gr.vector_sink_s() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_100b(self): random.seed(0) src_data = [] for i in xrange(100): src_data.append((random.randint(-2**15,2**15-1))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_s(tuple(src_data), False) op1 = blocks.packed_to_unpacked_ss(1, gr.GR_LSB_FIRST) op2 = blocks.unpacked_to_packed_ss(1, gr.GR_LSB_FIRST)<|fim▁hole|> dst = gr.vector_sink_s() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_101a(self): random.seed(0) src_data = [] for i in xrange(100): src_data.append((random.randint(-2**15,2**15-1))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_s(tuple(src_data), False) op1 = blocks.packed_to_unpacked_ss(8, gr.GR_MSB_FIRST) op2 = blocks.unpacked_to_packed_ss(8, gr.GR_MSB_FIRST) dst = gr.vector_sink_s() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_101b(self): random.seed(0) src_data = [] for i in xrange(100): src_data.append((random.randint(-2**15,2**15-1))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_s(tuple(src_data), False) op1 = blocks.packed_to_unpacked_ss(8, gr.GR_LSB_FIRST) op2 = blocks.unpacked_to_packed_ss(8, gr.GR_LSB_FIRST) dst = gr.vector_sink_s() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) # tests on ints def test_200a(self): random.seed(0) src_data = [] for i in xrange(100): src_data.append((random.randint(-2**31,2**31-1))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_i(tuple(src_data), False) op1 = blocks.packed_to_unpacked_ii(1, gr.GR_MSB_FIRST) op2 = blocks.unpacked_to_packed_ii(1, gr.GR_MSB_FIRST) dst = gr.vector_sink_i() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_200b(self): random.seed(0) src_data = [] for i in xrange(100): src_data.append((random.randint(-2**31,2**31-1))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_i(tuple(src_data), False) op1 = blocks.packed_to_unpacked_ii(1, gr.GR_LSB_FIRST) op2 = blocks.unpacked_to_packed_ii(1, gr.GR_LSB_FIRST) dst = gr.vector_sink_i() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_201a(self): random.seed(0) src_data = [] for i in xrange(100): src_data.append((random.randint(-2**31,2**31-1))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_i(tuple(src_data), False) op1 = blocks.packed_to_unpacked_ii(8, gr.GR_MSB_FIRST) op2 = blocks.unpacked_to_packed_ii(8, gr.GR_MSB_FIRST) dst = gr.vector_sink_i() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) def test_201b(self): random.seed(0) src_data = [] for i in xrange(100): src_data.append((random.randint(-2**31,2**31-1))) src_data = tuple(src_data) expected_results = src_data src = gr.vector_source_i(tuple(src_data), False) op1 = blocks.packed_to_unpacked_ii(8, gr.GR_LSB_FIRST) op2 = blocks.unpacked_to_packed_ii(8, gr.GR_LSB_FIRST) dst = gr.vector_sink_i() self.tb.connect(src, op1, op2) self.tb.connect(op2, dst) self.tb.run() self.assertEqual(expected_results, dst.data()) if __name__ == '__main__': gr_unittest.run(test_packing, "test_packing.xml")<|fim▁end|>
<|file_name|>namespaced-enum-glob-import-no-impls.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. mod m2 { pub enum Foo { A, B(isize), C { a: isize }, } impl Foo { pub fn foo() {} pub fn bar(&self) {} } }<|fim▁hole|> mod m { pub use m2::Foo::*; } pub fn main() { use m2::Foo::*; foo(); //~ ERROR unresolved name `foo` m::foo(); //~ ERROR unresolved name `m::foo` bar(); //~ ERROR unresolved name `bar` m::bar(); //~ ERROR unresolved name `m::bar` }<|fim▁end|>
<|file_name|>violet_hold.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2011-2012 ArkCORE2 <http://www.arkania.net/> * Copyright (C) 2010-2012 Project SkyFire <http://www.projectskyfire.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptPCH.h" #include "ScriptedEscortAI.h" #include "violet_hold.h" #define GOSSIP_START_EVENT "Get your people to safety, we'll keep the Blue Dragonflight's forces at bay." #define GOSSIP_ITEM_1 "Activate the crystals when we get in trouble, right" #define GOSSIP_I_WANT_IN "I'm not fighting, so send me in now!" #define SPAWN_TIME 20000 enum PortalCreatures { CREATURE_AZURE_INVADER_1 = 30661, CREATURE_AZURE_INVADER_2 = 30961, CREATURE_AZURE_SPELLBREAKER_1 = 30662, CREATURE_AZURE_SPELLBREAKER_2 = 30962, CREATURE_AZURE_BINDER_1 = 30663, CREATURE_AZURE_BINDER_2 = 30918, CREATURE_AZURE_MAGE_SLAYER_1 = 30664, CREATURE_AZURE_MAGE_SLAYER_2 = 30963, CREATURE_AZURE_CAPTAIN = 30666, CREATURE_AZURE_SORCEROR = 30667, CREATURE_AZURE_RAIDER = 30668, CREATURE_AZURE_STALKER = 32191 }; enum AzureInvaderSpells { SPELL_CLEAVE = 15496, SPELL_IMPALE = 58459, H_SPELL_IMPALE = 59256, SPELL_BRUTAL_STRIKE = 58460, SPELL_SUNDER_ARMOR = 58461 }; enum AzureSellbreakerSpells { SPELL_ARCANE_BLAST = 58462, H_SPELL_ARCANE_BLAST = 59257, SPELL_SLOW = 25603, SPELL_CHAINS_OF_ICE = 58464, SPELL_CONE_OF_COLD = 58463, H_SPELL_CONE_OF_COLD = 59258 }; enum AzureBinderSpells { SPELL_ARCANE_BARRAGE = 58456, H_SPELL_ARCANE_BARRAGE = 59248, SPELL_ARCANE_EXPLOSION = 58455, H_SPELL_ARCANE_EXPLOSION = 59245, SPELL_FROST_NOVA = 58458, H_SPELL_FROST_NOVA = 59253, SPELL_FROSTBOLT = 58457, H_SPELL_FROSTBOLT = 59251, }; enum AzureMageSlayerSpells { SPELL_ARCANE_EMPOWERMENT = 58469, SPELL_SPELL_LOCK = 30849 }; enum AzureCaptainSpells { SPELL_MORTAL_STRIKE = 32736, SPELL_WHIRLWIND_OF_STEEL = 41057 }; enum AzureSorcerorSpells { SPELL_ARCANE_STREAM = 60181, H_SPELL_ARCANE_STREAM = 60204, SPELL_MANA_DETONATION = 60182, H_SPELL_MANA_DETONATION = 60205 }; enum AzureRaiderSpells { SPELL_CONCUSSION_BLOW = 52719, SPELL_MAGIC_REFLECTION = 60158 }; enum AzureStalkerSpells { SPELL_BACKSTAB = 58471, SPELL_TACTICAL_BLINK = 58470 }; enum AzureSaboteurSpells { SABOTEUR_SHIELD_DISRUPTION = 58291, SABOTEUR_SHIELD_EFFECT = 45775 }; enum TrashDoorSpell { SPELL_DESTROY_DOOR_SEAL = 58040 }; enum Spells { SPELL_PORTAL_CHANNEL = 58012, SPELL_CRYSTALL_ACTIVATION = 57804 }; enum eSinclari { SAY_SINCLARI_1 = -1608045 }; float FirstPortalWPs [6][3] = { {1877.670288f, 842.280273f, 43.333591f}, {1877.338867f, 834.615356f, 38.762287f}, {1872.161011f, 823.854309f, 38.645401f}, {1864.860474f, 815.787170f, 38.784843f}, {1858.953735f, 810.048950f, 44.008759f}, {1843.707153f, 805.807739f, 44.135197f} //{1825.736084f, 807.305847f, 44.363785f} }; float SecondPortalFirstWPs [9][3] = { {1902.561401f, 853.334656f, 47.106117f}, {1895.486084f, 855.376404f, 44.334591f}, {1882.805176f, 854.993286f, 43.333591f}, {1877.670288f, 842.280273f, 43.333591f}, {1877.338867f, 834.615356f, 38.762287f}, {1872.161011f, 823.854309f, 38.645401f}, {1864.860474f, 815.787170f, 38.784843f}, {1858.953735f, 810.048950f, 44.008759f}, {1843.707153f, 805.807739f, 44.135197f} //{1825.736084f, 807.305847f, 44.363785f} }; float SecondPortalSecondWPs [8][3] = { {1929.392212f, 837.614990f, 47.136166f}, {1928.290649f, 824.750427f, 45.474411f}, {1915.544922f, 826.919373f, 38.642811f}, {1900.933960f, 818.855652f, 38.801647f}, {1886.810547f, 813.536621f, 38.490490f}, {1869.079712f, 808.701538f, 38.689003f}, {1860.843384f, 806.645020f, 44.008789f}, {1843.707153f, 805.807739f, 44.135197f} //{1825.736084f, 807.305847f, 44.363785f} }; float ThirdPortalWPs [8][3] = { {1934.049438f, 815.778503f, 52.408699f}, {1928.290649f, 824.750427f, 45.474411f}, {1915.544922f, 826.919373f, 38.642811f}, {1900.933960f, 818.855652f, 38.801647f}, {1886.810547f, 813.536621f, 38.490490f}, {1869.079712f, 808.701538f, 38.689003f}, {1860.843384f, 806.645020f, 44.008789f}, {1843.707153f, 805.807739f, 44.135197f} //{1825.736084f, 807.305847f, 44.363785f} }; float FourthPortalWPs [9][3] = { {1921.658447f, 761.657043f, 50.866741f}, {1910.559814f, 755.780457f, 47.701447f}, {1896.664673f, 752.920898f, 47.667004f}, {1887.398804f, 763.633240f, 47.666851f}, {1879.020386f, 775.396973f, 38.705990f}, {1872.439087f, 782.568604f, 38.808292f}, {1863.573364f, 791.173584f, 38.743660f}, {1857.811890f, 796.765564f, 43.950329f}, {1845.577759f, 800.681152f, 44.104248f} //{1827.100342f, 801.605957f, 44.363358f} }; float FifthPortalWPs [6][3] = { {1887.398804f, 763.633240f, 47.666851f}, {1879.020386f, 775.396973f, 38.705990f}, {1872.439087f, 782.568604f, 38.808292f}, {1863.573364f, 791.173584f, 38.743660f}, {1857.811890f, 796.765564f, 43.950329f}, {1845.577759f, 800.681152f, 44.104248f} //{1827.100342f, 801.605957f, 44.363358f} }; float SixthPoralWPs [4][3] = { {1888.861084f, 805.074768f, 38.375790f}, {1869.793823f, 804.135804f, 38.647018f}, {1861.541504f, 804.149780f, 43.968292f}, {1843.567017f, 804.288208f, 44.139091f} //{1826.889648f, 803.929993f, 44.363239f} }; const float SaboteurFinalPos1[3][3] = { {1892.502319f, 777.410767f, 38.630402f}, {1891.165161f, 762.969421f, 47.666920f}, {1893.168091f, 740.919189f, 47.666920f} }; const float SaboteurFinalPos2[3][3] = { {1882.242676f, 834.818726f, 38.646786f}, {1879.220825f, 842.224854f, 43.333641f}, {1873.842896f, 863.892456f, 43.333641f} }; const float SaboteurFinalPos3[2][3] = { {1904.298340f, 792.400391f, 38.646782f}, {1935.716919f, 758.437073f, 30.627895f} }; const float SaboteurFinalPos4[3] = { 1855.006104f, 760.641724f, 38.655266f }; const float SaboteurFinalPos5[3] = { 1906.667358f, 841.705566f, 38.637894f }; const float SaboteurFinalPos6[5][3] = { {1911.437012f, 821.289246f, 38.684128f}, {1920.734009f, 822.978027f, 41.525414f}, {1928.262939f, 830.836609f, 44.668266f}, {1929.338989f, 837.593933f, 47.137596f}, {1931.063354f, 848.468445f, 47.190434f} }; const Position MovePosition = {1806.955566f, 803.851807f, 44.363323f, 0.0f}; const Position playerTeleportPosition = {1830.531006f, 803.939758f, 44.340508f, 6.281611f}; const Position sinclariOutsidePosition = {1817.315674f, 804.060608f, 44.363998f, 0.0f}; class npc_sinclari_vh : public CreatureScript { public: npc_sinclari_vh() : CreatureScript("npc_sinclari_vh") { } bool OnGossipSelect(Player* player, Creature* creature, uint32 /*Sender*/, uint32 action) { player->PlayerTalkClass->ClearMenus(); switch (action) { case GOSSIP_ACTION_INFO_DEF+1: player->CLOSE_GOSSIP_MENU(); CAST_AI(npc_sinclari_vh::npc_sinclariAI, (creature->AI()))->uiPhase = 1; if (InstanceScript* instance = creature->GetInstanceScript()) instance->SetData(DATA_MAIN_EVENT_PHASE, SPECIAL); break; case GOSSIP_ACTION_INFO_DEF+2: player->SEND_GOSSIP_MENU(13854, creature->GetGUID()); break; case GOSSIP_ACTION_INFO_DEF+3: player->NearTeleportTo(playerTeleportPosition.GetPositionX(), playerTeleportPosition.GetPositionY(), playerTeleportPosition.GetPositionZ(), playerTeleportPosition.GetOrientation(), true); player->CLOSE_GOSSIP_MENU(); break; } return true; } bool OnGossipHello(Player* player, Creature* creature) { if (InstanceScript* instance = creature->GetInstanceScript()) { switch (instance->GetData(DATA_MAIN_EVENT_PHASE)) { case NOT_STARTED: case FAIL: // Allow to start event if not started or wiped player->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM_1, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+2); player->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_START_EVENT, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+1); player->SEND_GOSSIP_MENU(13853, creature->GetGUID()); break; case IN_PROGRESS: // Allow to teleport inside if event is in progress player->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_I_WANT_IN, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+3); player->SEND_GOSSIP_MENU(13853, creature->GetGUID()); break; default: player->SEND_GOSSIP_MENU(13910, creature->GetGUID()); } } return true; } CreatureAI* GetAI(Creature* creature) const { return new npc_sinclariAI(creature); } struct npc_sinclariAI : public ScriptedAI { npc_sinclariAI(Creature* creature) : ScriptedAI(creature) { instance = creature->GetInstanceScript(); } InstanceScript* instance; uint8 uiPhase; uint32 uiTimer; void Reset() { uiPhase = 0; uiTimer = 0; me->SetReactState(REACT_AGGRESSIVE); std::list<Creature*> GuardList; me->GetCreatureListWithEntryInGrid(GuardList, NPC_VIOLET_HOLD_GUARD, 40.0f); if (!GuardList.empty()) { for (std::list<Creature*>::const_iterator itr = GuardList.begin(); itr != GuardList.end(); ++itr) { if (Creature* pGuard = *itr) { pGuard->DisappearAndDie(); pGuard->Respawn(); pGuard->SetVisible(true); pGuard->SetReactState(REACT_AGGRESSIVE); } } } } void UpdateAI(const uint32 uiDiff) { ScriptedAI::UpdateAI(uiDiff); if (uiPhase) { if (uiTimer <= uiDiff) { switch (uiPhase) { case 1: DoScriptText(SAY_SINCLARI_1, me); uiTimer = 4000; uiPhase = 2; break; case 2: { std::list<Creature*> GuardList; me->GetCreatureListWithEntryInGrid(GuardList, NPC_VIOLET_HOLD_GUARD, 40.0f); if (!GuardList.empty()) for (std::list<Creature*>::const_iterator itr = GuardList.begin(); itr != GuardList.end(); ++itr) { if (Creature* pGuard = *itr) { pGuard->RemoveUnitMovementFlag(MOVEMENTFLAG_WALKING); pGuard->GetMotionMaster()->MovePoint(0, MovePosition); } } uiTimer = 6000; uiPhase = 3; break; } case 3: { std::list<Creature*> GuardList; me->GetCreatureListWithEntryInGrid(GuardList, NPC_VIOLET_HOLD_GUARD, 40.0f); if (!GuardList.empty()) for (std::list<Creature*>::const_iterator itr = GuardList.begin(); itr != GuardList.end(); ++itr) { if (Creature* pGuard = *itr) { pGuard->SetVisible(false); pGuard->SetReactState(REACT_PASSIVE); } } uiTimer = 2000; uiPhase = 4; break; } case 4: me->GetMotionMaster()->MovePoint(0, sinclariOutsidePosition); uiTimer = 4000; uiPhase = 5; break; case 5: if (instance) instance->SetData(DATA_MAIN_EVENT_PHASE, IN_PROGRESS); me->SetReactState(REACT_PASSIVE); uiTimer = 0; uiPhase = 0; break; } } else uiTimer -= uiDiff; } if (!UpdateVictim()) return; DoMeleeAttackIfReady(); } }; }; class mob_azure_saboteur : public CreatureScript { public: mob_azure_saboteur() : CreatureScript("mob_azure_saboteur") { } CreatureAI* GetAI(Creature* creature) const { return new mob_azure_saboteurAI (creature); } struct mob_azure_saboteurAI : public npc_escortAI { mob_azure_saboteurAI(Creature* c):npc_escortAI(c) { instance = c->GetInstanceScript(); bHasGotMovingPoints = false; uiBoss = 0; Reset(); } InstanceScript* instance; bool bHasGotMovingPoints; uint32 uiBoss; void Reset() { if (instance && !uiBoss) uiBoss = instance->GetData(DATA_WAVE_COUNT) == 6 ? instance->GetData(DATA_FIRST_BOSS) : instance->GetData(DATA_SECOND_BOSS); me->SetReactState(REACT_PASSIVE); me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC|UNIT_FLAG_NON_ATTACKABLE); me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); } void WaypointReached(uint32 uiWPointId) { switch (uiBoss) { case 1: if (uiWPointId == 2) FinishPointReached(); break; case 2: if (uiWPointId == 2) FinishPointReached(); break; case 3: if (uiWPointId == 1) FinishPointReached(); break; case 4: if (uiWPointId == 0) FinishPointReached(); break; case 5: if (uiWPointId == 0) FinishPointReached(); break; case 6: if (uiWPointId == 4) FinishPointReached(); break; } } void UpdateAI(const uint32 diff) { if (instance && instance->GetData(DATA_MAIN_EVENT_PHASE) != IN_PROGRESS) me->CastStop(); npc_escortAI::UpdateAI(diff); if (!bHasGotMovingPoints) { bHasGotMovingPoints = true; switch (uiBoss) { case 1: for (int i=0;i<3;i++) AddWaypoint(i, SaboteurFinalPos1[i][0], SaboteurFinalPos1[i][1], SaboteurFinalPos1[i][2], 0); me->SetHomePosition(SaboteurFinalPos1[2][0], SaboteurFinalPos1[2][1], SaboteurFinalPos1[2][2], 4.762346f); break; case 2: for (int i=0;i<3;i++) AddWaypoint(i, SaboteurFinalPos2[i][0], SaboteurFinalPos2[i][1], SaboteurFinalPos2[i][2], 0); me->SetHomePosition(SaboteurFinalPos2[2][0], SaboteurFinalPos2[2][1], SaboteurFinalPos2[2][2], 1.862674f); break; case 3: for (int i=0;i<2;i++) AddWaypoint(i, SaboteurFinalPos3[i][0], SaboteurFinalPos3[i][1], SaboteurFinalPos3[i][2], 0); me->SetHomePosition(SaboteurFinalPos3[1][0], SaboteurFinalPos3[1][1], SaboteurFinalPos3[1][2], 5.500638f); break; case 4: AddWaypoint(0, SaboteurFinalPos4[0], SaboteurFinalPos4[1], SaboteurFinalPos4[2], 0); me->SetHomePosition(SaboteurFinalPos4[0], SaboteurFinalPos4[1], SaboteurFinalPos4[2], 3.991108f); break; case 5: AddWaypoint(0, SaboteurFinalPos5[0], SaboteurFinalPos5[1], SaboteurFinalPos5[2], 0); me->SetHomePosition(SaboteurFinalPos5[0], SaboteurFinalPos5[1], SaboteurFinalPos5[2], 1.100841f); break; case 6: for (int i=0;i<5;i++) AddWaypoint(i, SaboteurFinalPos6[i][0], SaboteurFinalPos6[i][1], SaboteurFinalPos6[i][2], 0); me->SetHomePosition(SaboteurFinalPos6[4][0], SaboteurFinalPos6[4][1], SaboteurFinalPos6[4][2], 0.983031f); break; } SetDespawnAtEnd(false); Start(true, true); } } void FinishPointReached() { me->CastSpell(me, SABOTEUR_SHIELD_DISRUPTION, false); me->DisappearAndDie(); Creature* pSaboPort = Unit::GetCreature((*me), instance->GetData64(DATA_SABOTEUR_PORTAL)); if (pSaboPort) pSaboPort->DisappearAndDie(); instance->SetData(DATA_START_BOSS_ENCOUNTER, 1); } }; }; class npc_teleportation_portal_vh : public CreatureScript { public: npc_teleportation_portal_vh() : CreatureScript("npc_teleportation_portal_vh") { } CreatureAI* GetAI(Creature* creature) const { return new npc_teleportation_portalAI(creature); } struct npc_teleportation_portalAI : public ScriptedAI { npc_teleportation_portalAI(Creature* c) : ScriptedAI(c), listOfMobs(me) { instance = c->GetInstanceScript(); uiTypeOfMobsPortal = urand(0, 1); // 0 - elite mobs 1 - portal guardian or portal keeper with regular mobs bPortalGuardianOrKeeperOrEliteSpawn = false; } uint32 uiSpawnTimer; bool bPortalGuardianOrKeeperOrEliteSpawn; uint8 uiTypeOfMobsPortal; SummonList listOfMobs; InstanceScript* instance; void Reset() { uiSpawnTimer = 10000; bPortalGuardianOrKeeperOrEliteSpawn = false; } void EnterCombat(Unit* /*who*/) {} void MoveInLineOfSight(Unit* /*who*/) {} void UpdateAI(const uint32 diff) { if (!instance) //Massive usage of instance, global check return; if (instance->GetData(DATA_REMOVE_NPC) == 1) { me->DespawnOrUnsummon(); instance->SetData(DATA_REMOVE_NPC, 0); } uint8 uiWaveCount = instance->GetData(DATA_WAVE_COUNT); if ((uiWaveCount == 6) || (uiWaveCount == 12)) //Don't spawn mobs on boss encounters return; switch (uiTypeOfMobsPortal) { // spawn elite mobs and then set portals visibility to make it look like it dissapeard case 0: if (!bPortalGuardianOrKeeperOrEliteSpawn) { if (uiSpawnTimer <= diff) { bPortalGuardianOrKeeperOrEliteSpawn = true; uint8 k = uiWaveCount < 12 ? 2 : 3; for (uint8 i = 0; i < k; ++i) { uint32 entry = RAND(CREATURE_AZURE_CAPTAIN, CREATURE_AZURE_RAIDER, CREATURE_AZURE_STALKER, CREATURE_AZURE_SORCEROR); DoSummon(entry, me, 2.0f, 20000, TEMPSUMMON_DEAD_DESPAWN); } me->SetVisible(false); } else uiSpawnTimer -= diff; } else { // if all spawned elites have died kill portal if (listOfMobs.empty()) { me->Kill(me, false); me->RemoveCorpse(); } } break; // spawn portal guardian or portal keeper with regular mobs case 1: if (uiSpawnTimer <= diff) { if (bPortalGuardianOrKeeperOrEliteSpawn) { uint8 k = instance->GetData(DATA_WAVE_COUNT) < 12 ? 3 : 4; for (uint8 i = 0; i < k; ++i) { uint32 entry = RAND(CREATURE_AZURE_INVADER_1, CREATURE_AZURE_INVADER_2, CREATURE_AZURE_SPELLBREAKER_1, CREATURE_AZURE_SPELLBREAKER_2, CREATURE_AZURE_MAGE_SLAYER_1, CREATURE_AZURE_MAGE_SLAYER_2, CREATURE_AZURE_BINDER_1, CREATURE_AZURE_BINDER_2); DoSummon(entry, me, 2.0f, 20000, TEMPSUMMON_DEAD_DESPAWN); } } else { bPortalGuardianOrKeeperOrEliteSpawn = true; uint32 entry = RAND(CREATURE_PORTAL_GUARDIAN, CREATURE_PORTAL_KEEPER); if (Creature* pPortalKeeper = DoSummon(entry, me, 2.0f, 0, TEMPSUMMON_DEAD_DESPAWN)) me->CastSpell(pPortalKeeper, SPELL_PORTAL_CHANNEL, false); } uiSpawnTimer = SPAWN_TIME; } else uiSpawnTimer -= diff; if (bPortalGuardianOrKeeperOrEliteSpawn && !me->IsNonMeleeSpellCasted(false)) { me->Kill(me, false); me->RemoveCorpse(); } break; } } void JustDied(Unit* /*killer*/) { if (instance) instance->SetData(DATA_WAVE_COUNT, instance->GetData(DATA_WAVE_COUNT)+1); } void JustSummoned(Creature* summoned) { listOfMobs.Summon(summoned); if (summoned) instance->SetData64(DATA_ADD_TRASH_MOB, summoned->GetGUID()); } void SummonedMobDied(Creature* summoned) { listOfMobs.Despawn(summoned); if (summoned) instance->SetData64(DATA_DEL_TRASH_MOB, summoned->GetGUID()); } }; }; struct violet_hold_trashAI : public npc_escortAI { violet_hold_trashAI(Creature* c):npc_escortAI(c) { instance = c->GetInstanceScript(); bHasGotMovingPoints = false; if (instance) portalLocationID = instance->GetData(DATA_PORTAL_LOCATION); Reset(); } public: InstanceScript* instance; bool bHasGotMovingPoints; uint32 portalLocationID; uint32 secondPortalRouteID; void WaypointReached(uint32 uiPointId) { switch (portalLocationID) { case 0: if (uiPointId == 5) CreatureStartAttackDoor(); break; case 1: if ((uiPointId == 8 && secondPortalRouteID == 0) || (uiPointId == 7 && secondPortalRouteID == 1)) CreatureStartAttackDoor(); break; case 2: if (uiPointId == 7) CreatureStartAttackDoor(); break; case 3: if (uiPointId == 8) CreatureStartAttackDoor(); break; case 4: if (uiPointId == 5) CreatureStartAttackDoor(); break; case 5: if (uiPointId == 3) CreatureStartAttackDoor(); break; } } void UpdateAI(const uint32) { if (instance && instance->GetData(DATA_MAIN_EVENT_PHASE) != IN_PROGRESS) me->CastStop(); if (!bHasGotMovingPoints) { bHasGotMovingPoints = true; switch (portalLocationID) { case 0: for (int i=0;i<6;i++) AddWaypoint(i, FirstPortalWPs[i][0]+irand(-1, 1), FirstPortalWPs[i][1]+irand(-1, 1), FirstPortalWPs[i][2]+irand(-1, 1), 0); me->SetHomePosition(FirstPortalWPs[5][0], FirstPortalWPs[5][1], FirstPortalWPs[5][2], 3.149439f); break; case 1: secondPortalRouteID = urand(0, 1); switch (secondPortalRouteID) { case 0: for (int i=0;i<9;i++) AddWaypoint(i, SecondPortalFirstWPs[i][0]+irand(-1, 1), SecondPortalFirstWPs[i][1]+irand(-1, 1), SecondPortalFirstWPs[i][2], 0); me->SetHomePosition(SecondPortalFirstWPs[8][0]+irand(-1, 1), SecondPortalFirstWPs[8][1]+irand(-1, 1), SecondPortalFirstWPs[8][2]+irand(-1, 1), 3.149439f); break; case 1: for (int i=0;i<8;i++) AddWaypoint(i, SecondPortalSecondWPs[i][0]+irand(-1, 1), SecondPortalSecondWPs[i][1]+irand(-1, 1), SecondPortalSecondWPs[i][2], 0); me->SetHomePosition(SecondPortalSecondWPs[7][0], SecondPortalSecondWPs[7][1], SecondPortalSecondWPs[7][2], 3.149439f); break; } break; case 2: for (int i=0;i<8;i++) AddWaypoint(i, ThirdPortalWPs[i][0]+irand(-1, 1), ThirdPortalWPs[i][1]+irand(-1, 1), ThirdPortalWPs[i][2], 0); me->SetHomePosition(ThirdPortalWPs[7][0], ThirdPortalWPs[7][1], ThirdPortalWPs[7][2], 3.149439f); break; case 3: for (int i=0;i<9;i++) AddWaypoint(i, FourthPortalWPs[i][0]+irand(-1, 1), FourthPortalWPs[i][1]+irand(-1, 1), FourthPortalWPs[i][2], 0); me->SetHomePosition(FourthPortalWPs[8][0], FourthPortalWPs[8][1], FourthPortalWPs[8][2], 3.149439f); break; case 4: for (int i=0;i<6;i++) AddWaypoint(i, FifthPortalWPs[i][0]+irand(-1, 1), FifthPortalWPs[i][1]+irand(-1, 1), FifthPortalWPs[i][2], 0); me->SetHomePosition(FifthPortalWPs[5][0], FifthPortalWPs[5][1], FifthPortalWPs[5][2], 3.149439f); break; case 5: for (int i=0;i<4;i++) AddWaypoint(i, SixthPoralWPs[i][0]+irand(-1, 1), SixthPoralWPs[i][1]+irand(-1, 1), SixthPoralWPs[i][2], 0); me->SetHomePosition(SixthPoralWPs[3][0], SixthPoralWPs[3][1], SixthPoralWPs[3][2], 3.149439f); break; } SetDespawnAtEnd(false); Start(true, true); } } void JustDied(Unit* /*unit*/) { if (Creature* portal = Unit::GetCreature((*me), instance->GetData64(DATA_TELEPORTATION_PORTAL))) CAST_AI(npc_teleportation_portal_vh::npc_teleportation_portalAI, portal->AI())->SummonedMobDied(me); if (instance) instance->SetData(DATA_NPC_PRESENCE_AT_DOOR_REMOVE, 1); } void CreatureStartAttackDoor() { me->SetReactState(REACT_PASSIVE); DoCast(SPELL_DESTROY_DOOR_SEAL); if (instance) instance->SetData(DATA_NPC_PRESENCE_AT_DOOR_ADD, 1); } }; class mob_azure_invader : public CreatureScript { public: mob_azure_invader() : CreatureScript("mob_azure_invader") { } CreatureAI* GetAI(Creature* creature) const { return new mob_azure_invaderAI (creature); } struct mob_azure_invaderAI : public violet_hold_trashAI { mob_azure_invaderAI(Creature* c) : violet_hold_trashAI(c) { instance = c->GetInstanceScript(); } uint32 uiCleaveTimer; uint32 uiImpaleTimer; uint32 uiBrutalStrikeTimer; uint32 uiSunderArmorTimer; void Reset() { uiCleaveTimer = 5000; uiImpaleTimer = 4000; uiBrutalStrikeTimer = 5000; uiSunderArmorTimer = 4000; } void UpdateAI(const uint32 diff) { violet_hold_trashAI::UpdateAI(diff); npc_escortAI::UpdateAI(diff); if (!UpdateVictim()) return; if (me->GetEntry() == CREATURE_AZURE_INVADER_1) { if (uiCleaveTimer <= diff) { DoCast(me->getVictim(), SPELL_CLEAVE); uiCleaveTimer = 5000; } else uiCleaveTimer -= diff; if (uiImpaleTimer <= diff) { Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true); if (target) DoCast(target, SPELL_IMPALE); uiImpaleTimer = 4000; } else uiImpaleTimer -= diff; } if (me->GetEntry() == CREATURE_AZURE_INVADER_2) { if (uiBrutalStrikeTimer <= diff) { DoCast(me->getVictim(), SPELL_BRUTAL_STRIKE); uiBrutalStrikeTimer = 5000; } else uiBrutalStrikeTimer -= diff; if (uiSunderArmorTimer <= diff) { DoCast(me->getVictim(), SPELL_SUNDER_ARMOR); uiSunderArmorTimer = urand(8000, 10000); } else uiSunderArmorTimer -= diff; DoMeleeAttackIfReady(); } DoMeleeAttackIfReady(); } }; }; class mob_azure_binder : public CreatureScript { public: mob_azure_binder() : CreatureScript("mob_azure_binder") { } CreatureAI* GetAI(Creature* creature) const { return new mob_azure_binderAI (creature); } struct mob_azure_binderAI : public violet_hold_trashAI { mob_azure_binderAI(Creature* c) : violet_hold_trashAI(c) { instance = c->GetInstanceScript(); } uint32 uiArcaneExplosionTimer; uint32 uiArcainBarrageTimer; uint32 uiFrostNovaTimer; uint32 uiFrostboltTimer; void Reset() { uiArcaneExplosionTimer = 5000; uiArcainBarrageTimer = 4000; uiFrostNovaTimer = 5000; uiFrostboltTimer = 4000; } void UpdateAI(const uint32 diff) { violet_hold_trashAI::UpdateAI(diff); npc_escortAI::UpdateAI(diff); if (!UpdateVictim()) return; if (me->GetEntry() == CREATURE_AZURE_BINDER_1) { if (uiArcaneExplosionTimer <= diff) { DoCast(DUNGEON_MODE(SPELL_ARCANE_EXPLOSION, H_SPELL_ARCANE_EXPLOSION)); uiArcaneExplosionTimer = 5000; } else uiArcaneExplosionTimer -= diff; if (uiArcainBarrageTimer <= diff) { Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true); if (target) DoCast(target, DUNGEON_MODE(SPELL_ARCANE_BARRAGE, H_SPELL_ARCANE_BARRAGE)); uiArcainBarrageTimer = 6000; } else uiArcainBarrageTimer -= diff; } if (me->GetEntry() == CREATURE_AZURE_BINDER_2) { if (uiFrostNovaTimer <= diff) { DoCast(DUNGEON_MODE(SPELL_FROST_NOVA, H_SPELL_FROST_NOVA)); uiFrostNovaTimer = 5000; } else uiFrostNovaTimer -= diff; if (uiFrostboltTimer <= diff) { Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true); if (target) DoCast(target, DUNGEON_MODE(SPELL_FROSTBOLT, H_SPELL_FROSTBOLT)); uiFrostboltTimer = 6000; } else uiFrostboltTimer -= diff; } DoMeleeAttackIfReady(); } }; }; class mob_azure_mage_slayer : public CreatureScript { public: mob_azure_mage_slayer() : CreatureScript("mob_azure_mage_slayer") { } CreatureAI* GetAI(Creature* creature) const { return new mob_azure_mage_slayerAI (creature); } struct mob_azure_mage_slayerAI : public violet_hold_trashAI { mob_azure_mage_slayerAI(Creature* c) : violet_hold_trashAI(c) { instance = c->GetInstanceScript(); } uint32 uiArcaneEmpowermentTimer; uint32 uiSpellLockTimer; void Reset() { uiArcaneEmpowermentTimer = 5000; uiSpellLockTimer = 5000; } void UpdateAI(const uint32 diff)<|fim▁hole|> violet_hold_trashAI::UpdateAI(diff); npc_escortAI::UpdateAI(diff); if (!UpdateVictim()) return; if (me->GetEntry() == CREATURE_AZURE_MAGE_SLAYER_1) { if (uiArcaneEmpowermentTimer <= diff) { DoCast(me, SPELL_ARCANE_EMPOWERMENT); uiArcaneEmpowermentTimer = 14000; } else uiArcaneEmpowermentTimer -= diff; } if (me->GetEntry() == CREATURE_AZURE_MAGE_SLAYER_2) { if (uiSpellLockTimer <= diff) { Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true); if (target) DoCast(target, SPELL_SPELL_LOCK); uiSpellLockTimer = 9000; } else uiSpellLockTimer -= diff; } DoMeleeAttackIfReady(); } }; }; class mob_azure_raider : public CreatureScript { public: mob_azure_raider() : CreatureScript("mob_azure_raider") { } CreatureAI* GetAI(Creature* creature) const { return new mob_azure_raiderAI (creature); } struct mob_azure_raiderAI : public violet_hold_trashAI { mob_azure_raiderAI(Creature* c) : violet_hold_trashAI(c) { instance = c->GetInstanceScript(); } uint32 uiConcussionBlowTimer; uint32 uiMagicReflectionTimer; void Reset() { uiConcussionBlowTimer = 5000; uiMagicReflectionTimer = 8000; } void UpdateAI(const uint32 diff) { violet_hold_trashAI::UpdateAI(diff); npc_escortAI::UpdateAI(diff); if (!UpdateVictim()) return; if (uiConcussionBlowTimer <= diff) { DoCast(me->getVictim(), SPELL_CONCUSSION_BLOW); uiConcussionBlowTimer = 5000; } else uiConcussionBlowTimer -= diff; if (uiMagicReflectionTimer <= diff) { DoCast(SPELL_MAGIC_REFLECTION); uiMagicReflectionTimer = urand(10000, 15000); } else uiMagicReflectionTimer -= diff; DoMeleeAttackIfReady(); } }; }; class mob_azure_stalker : public CreatureScript { public: mob_azure_stalker() : CreatureScript("mob_azure_stalker") { } CreatureAI* GetAI(Creature* creature) const { return new mob_azure_stalkerAI (creature); } struct mob_azure_stalkerAI : public violet_hold_trashAI { mob_azure_stalkerAI(Creature* c) : violet_hold_trashAI(c) { instance = c->GetInstanceScript(); } uint32 uiBackstabTimer; uint32 uiTacticalBlinkTimer; bool TacticalBlinkCasted; void Reset() { uiBackstabTimer = 1300; uiTacticalBlinkTimer = 8000; TacticalBlinkCasted =false; } void UpdateAI(const uint32 diff) { violet_hold_trashAI::UpdateAI(diff); npc_escortAI::UpdateAI(diff); if (!UpdateVictim()) return; if (!TacticalBlinkCasted) { if (uiTacticalBlinkTimer <= diff) { Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 40, true); if (target) DoCast(target, SPELL_TACTICAL_BLINK); uiTacticalBlinkTimer = 6000; TacticalBlinkCasted = true; } else uiTacticalBlinkTimer -= diff; } else { if (uiBackstabTimer <= diff) { Unit* target = SelectTarget(SELECT_TARGET_NEAREST, 0, 10, true); DoCast(target, SPELL_BACKSTAB); TacticalBlinkCasted = false; uiBackstabTimer =1300; } else uiBackstabTimer -= diff; } DoMeleeAttackIfReady(); } }; }; class mob_azure_spellbreaker : public CreatureScript { public: mob_azure_spellbreaker() : CreatureScript("mob_azure_spellbreaker") { } struct mob_azure_spellbreakerAI : public violet_hold_trashAI { mob_azure_spellbreakerAI(Creature* c) : violet_hold_trashAI(c) { instance = c->GetInstanceScript(); } uint32 uiArcaneBlastTimer; uint32 uiSlowTimer; uint32 uiChainsOfIceTimer; uint32 uiConeOfColdTimer; void Reset() { uiArcaneBlastTimer = 5000; uiSlowTimer = 4000; uiChainsOfIceTimer = 5000; uiConeOfColdTimer = 4000; } void UpdateAI(const uint32 diff) { violet_hold_trashAI::UpdateAI(diff); npc_escortAI::UpdateAI(diff); if (!UpdateVictim()) return; if (me->GetEntry() == CREATURE_AZURE_SPELLBREAKER_1) { if (uiArcaneBlastTimer <= diff) { Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true); if (target) DoCast(target, DUNGEON_MODE(SPELL_ARCANE_BLAST, H_SPELL_ARCANE_BLAST)); uiArcaneBlastTimer = 6000; } else uiArcaneBlastTimer -= diff; if (uiSlowTimer <= diff) { Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true); if (target) DoCast(target, SPELL_SLOW); uiSlowTimer = 5000; } else uiSlowTimer -= diff; } if (me->GetEntry() == CREATURE_AZURE_SPELLBREAKER_2) { if (uiChainsOfIceTimer <= diff) { Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true); if (target) DoCast(target, SPELL_CHAINS_OF_ICE); uiChainsOfIceTimer = 7000; } else uiChainsOfIceTimer -= diff; if (uiConeOfColdTimer <= diff) { DoCast(DUNGEON_MODE(SPELL_CONE_OF_COLD, H_SPELL_CONE_OF_COLD)); uiConeOfColdTimer = 5000; } else uiConeOfColdTimer -= diff; } DoMeleeAttackIfReady(); } }; CreatureAI* GetAI(Creature* creature) const { return new mob_azure_spellbreakerAI (creature); } }; class mob_azure_captain : public CreatureScript { public: mob_azure_captain() : CreatureScript("mob_azure_captain") { } CreatureAI* GetAI(Creature* creature) const { return new mob_azure_captainAI (creature); } struct mob_azure_captainAI : public violet_hold_trashAI { mob_azure_captainAI(Creature* c) : violet_hold_trashAI(c) { instance = c->GetInstanceScript(); } uint32 uiMortalStrikeTimer; uint32 uiWhirlwindTimer; void Reset() { uiMortalStrikeTimer = 5000; uiWhirlwindTimer = 8000; } void UpdateAI(const uint32 diff) { violet_hold_trashAI::UpdateAI(diff); npc_escortAI::UpdateAI(diff); if (!UpdateVictim()) return; if (uiMortalStrikeTimer <= diff) { DoCast(me->getVictim(), SPELL_MORTAL_STRIKE); uiMortalStrikeTimer = 5000; } else uiMortalStrikeTimer -= diff; if (uiWhirlwindTimer <= diff) { DoCast(me, SPELL_WHIRLWIND_OF_STEEL); uiWhirlwindTimer = 8000; } else uiWhirlwindTimer -= diff; DoMeleeAttackIfReady(); } }; }; class mob_azure_sorceror : public CreatureScript { public: mob_azure_sorceror() : CreatureScript("mob_azure_sorceror") { } CreatureAI* GetAI(Creature* creature) const { return new mob_azure_sorcerorAI (creature); } struct mob_azure_sorcerorAI : public violet_hold_trashAI { mob_azure_sorcerorAI(Creature* c) : violet_hold_trashAI(c) { instance = c->GetInstanceScript(); } uint32 uiArcaneStreamTimer; uint32 uiArcaneStreamTimerStartingValueHolder; uint32 uiManaDetonationTimer; void Reset() { uiArcaneStreamTimer = 4000; uiArcaneStreamTimerStartingValueHolder = uiArcaneStreamTimer; uiManaDetonationTimer = 5000; } void UpdateAI(const uint32 diff) { violet_hold_trashAI::UpdateAI(diff); npc_escortAI::UpdateAI(diff); if (!UpdateVictim()) return; if (uiArcaneStreamTimer <= diff) { Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true); if (target) DoCast(target, DUNGEON_MODE(SPELL_ARCANE_STREAM, H_SPELL_ARCANE_STREAM)); uiArcaneStreamTimer = urand(0, 5000)+5000; uiArcaneStreamTimerStartingValueHolder = uiArcaneStreamTimer; } else uiArcaneStreamTimer -= diff; if (uiManaDetonationTimer <= diff && uiArcaneStreamTimer >=1500 && uiArcaneStreamTimer <= uiArcaneStreamTimerStartingValueHolder/2) { DoCast(DUNGEON_MODE(SPELL_MANA_DETONATION, H_SPELL_MANA_DETONATION)); uiManaDetonationTimer = urand(2000, 6000); } else uiManaDetonationTimer -= diff; DoMeleeAttackIfReady(); } }; }; void AddSC_violet_hold() { new npc_sinclari_vh(); new npc_teleportation_portal_vh(); new mob_azure_invader(); new mob_azure_spellbreaker(); new mob_azure_binder(); new mob_azure_mage_slayer(); new mob_azure_captain(); new mob_azure_sorceror(); new mob_azure_raider(); new mob_azure_stalker(); new mob_azure_saboteur(); }<|fim▁end|>
{
<|file_name|>font.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ <%namespace name="helpers" file="/helpers.mako.rs" /> <%helpers:shorthand name="font" sub_properties="font-style font-variant font-weight font-stretch font-size line-height font-family ${'font-size-adjust' if product == 'gecko' else ''} ${'font-kerning' if product == 'gecko' else ''} ${'font-variant-caps' if product == 'gecko' else ''} ${'font-variant-position' if product == 'gecko' else ''} ${'font-language-override' if product == 'none' else ''}" spec="https://drafts.csswg.org/css-fonts-3/#propdef-font"> use parser::Parse; use properties::longhands::{font_style, font_variant, font_weight, font_stretch}; use properties::longhands::{font_size, line_height, font_family}; use properties::longhands::font_family::computed_value::FontFamily; pub fn parse_value(context: &ParserContext, input: &mut Parser) -> Result<Longhands, ()> { let mut nb_normals = 0; let mut style = None; let mut variant = None; let mut weight = None; let mut stretch = None; let size; loop { // Special-case 'normal' because it is valid in each of // font-style, font-weight, font-variant and font-stretch. // Leaves the values to None, 'normal' is the initial value for each of them. if input.try(|input| input.expect_ident_matching("normal")).is_ok() { nb_normals += 1; continue; } if style.is_none() { if let Ok(value) = input.try(|input| font_style::parse(context, input)) { style = Some(value); continue } } if weight.is_none() {<|fim▁hole|> continue } } if variant.is_none() { if let Ok(value) = input.try(|input| font_variant::parse(context, input)) { variant = Some(value); continue } } if stretch.is_none() { if let Ok(value) = input.try(|input| font_stretch::parse(context, input)) { stretch = Some(value); continue } } size = Some(try!(font_size::parse(context, input))); break } #[inline] fn count<T>(opt: &Option<T>) -> u8 { if opt.is_some() { 1 } else { 0 } } if size.is_none() || (count(&style) + count(&weight) + count(&variant) + count(&stretch) + nb_normals) > 4 { return Err(()) } let line_height = if input.try(|input| input.expect_delim('/')).is_ok() { Some(try!(line_height::parse(context, input))) } else { None }; let family = Vec::<FontFamily>::parse(context, input)?; Ok(Longhands { font_style: style, font_variant: variant, font_weight: weight, font_stretch: stretch, font_size: size, line_height: line_height, font_family: Some(font_family::SpecifiedValue(family)), % if product == "gecko": font_size_adjust: None, font_kerning: None, font_variant_caps: None, font_variant_position: None, % endif % if product == "none": font_language_override: None, % endif }) } // This may be a bit off, unsure, possibly needs changes impl<'a> LonghandsToSerialize<'a> { fn to_css_declared<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { if let DeclaredValue::Value(ref style) = *self.font_style { try!(style.to_css(dest)); try!(write!(dest, " ")); } if let DeclaredValue::Value(ref variant) = *self.font_variant { try!(variant.to_css(dest)); try!(write!(dest, " ")); } if let DeclaredValue::Value(ref weight) = *self.font_weight { try!(weight.to_css(dest)); try!(write!(dest, " ")); } if let DeclaredValue::Value(ref stretch) = *self.font_stretch { try!(stretch.to_css(dest)); try!(write!(dest, " ")); } try!(self.font_size.to_css(dest)); if let DeclaredValue::Value(ref height) = *self.line_height { match *height { line_height::SpecifiedValue::Normal => {}, _ => { try!(write!(dest, "/")); try!(height.to_css(dest)); } } } try!(write!(dest, " ")); self.font_family.to_css(dest) } } </%helpers:shorthand><|fim▁end|>
if let Ok(value) = input.try(|input| font_weight::parse(context, input)) { weight = Some(value);
<|file_name|>clustering.cpp<|end_file_name|><|fim▁begin|>/** * The Seeks proxy and plugin framework are part of the SEEKS project. * Copyright (C) 2009 Emmanuel Benazera, [email protected] * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "clustering.h" #include "seeks_proxy.h" #include "errlog.h" #include "miscutil.h" #include "lsh_configuration.h" using sp::errlog; using sp::miscutil; using sp::seeks_proxy; using lsh::lsh_configuration; using lsh::stopwordlist; namespace seeks_plugins { /*- centroid. -*/ centroid::centroid() { } /*- cluster. -*/ cluster::cluster() :_rank(0.0) { } void cluster::add_point(const uint32_t &id, hash_map<uint32_t,float,id_hash_uint> *p) { hash_map<uint32_t,hash_map<uint32_t,float,id_hash_uint>*,id_hash_uint>::iterator hit; if ((hit=_cpoints.find(id))!=_cpoints.end()) { errlog::log_error(LOG_LEVEL_ERROR, "Trying to add a snippet multiple times to the same cluster"); } else _cpoints.insert(std::pair<uint32_t,hash_map<uint32_t,float,id_hash_uint>*>(id,p)); } void cluster::compute_rank(const query_context *qc) { _rank = 0.0; hash_map<uint32_t,hash_map<uint32_t,float,id_hash_uint>*,id_hash_uint>::const_iterator hit = _cpoints.begin(); while (hit!=_cpoints.end()) { search_snippet *sp = qc->get_cached_snippet((*hit).first); _rank += sp->_seeks_rank; // summing up the seeks rank yields a cluster rank (we could use the mean instead). ++hit; } } void cluster::compute_label(const query_context *qc) { // compute total tf-idf weight for features of docs belonging to this cluster. hash_map<uint32_t,float,id_hash_uint> f_totals; hash_map<uint32_t,float,id_hash_uint>::iterator fhit,hit2; hash_map<uint32_t,hash_map<uint32_t,float,id_hash_uint>*,id_hash_uint>::const_iterator hit = _cpoints.begin(); while (hit!=_cpoints.end()) { hit2 = (*hit).second->begin(); while (hit2!=(*hit).second->end()) { if ((fhit=f_totals.find((*hit2).first))!=f_totals.end()) { (*fhit).second += (*hit2).second; } else f_totals.insert(std::pair<uint32_t,float>((*hit2).first,(*hit2).second)); ++hit2; }<|fim▁hole|> ++hit; } // grab features with the highest tf-idf weight. std::map<float,uint32_t,std::greater<float> > f_mtotals; fhit = f_totals.begin(); while (fhit!=f_totals.end()) { f_mtotals.insert(std::pair<float,uint32_t>((*fhit).second,(*fhit).first)); ++fhit; } f_totals.clear(); // we need query words and a stopword list for rejecting labels. std::vector<std::string> words; miscutil::tokenize(qc->_query,words," "); size_t nwords = words.size(); stopwordlist *swl = seeks_proxy::_lsh_config->get_wordlist(qc->_auto_lang); // turn features into word labels. int k=0; int KW = 2; // number of words per label. TODO: use weights for less/more words... std::map<float,uint32_t,std::greater<float> >::iterator mit = f_mtotals.begin(); while (mit!=f_mtotals.end()) { bool found = false; if (k>KW) break; else { hit = _cpoints.begin(); while (hit!=_cpoints.end()) { uint32_t id = (*hit).first; search_snippet *sp = qc->get_cached_snippet(id); hash_map<uint32_t,std::string,id_hash_uint>::const_iterator bit; if ((bit=sp->_bag_of_words->find((*mit).second))!=sp->_bag_of_words->end()) { // two checks needed: whether the word already belongs to the query // (can happen after successive use of cluster label queries); // whether the word belongs to the english stop word list (because // whatever the query language, some english results sometimes gets in). bool reject = false; for (size_t i=0; i<nwords; i++) { if (words.at(i) == (*bit).second) // check against query word. { reject = true; break; } } if (!reject) { reject = swl->has_word((*bit).second); // check against the english stopword list. } if (reject) { ++hit; continue; } /* std::cerr << "adding to label: " << (*bit).second << " --> " << (*mit).first << std::endl; */ if (!_label.empty()) _label += " "; _label += (*bit).second; found = true; break; } ++hit; } } ++mit; if (found) k++; } //std::cerr << "label: " << _label << std::endl; } /*- clustering. -*/ clustering::clustering() :_qc(NULL),_K(0),_clusters(NULL),_cluster_labels(NULL) { } clustering::clustering(query_context *qc, const std::vector<search_snippet*> &snippets, const short &K) :_qc(qc),_K(K),_snippets(snippets) { _clusters = new cluster[_K]; _cluster_labels = new std::vector<std::string>[_K]; // setup points and dimensions. size_t nsp = _snippets.size(); for (size_t s=0; s<nsp; s++) { search_snippet *sp = _snippets.at(s); if (sp->_features_tfidf) _points.insert(std::pair<uint32_t,hash_map<uint32_t,float,id_hash_uint>*>(sp->_id,sp->_features_tfidf)); } } clustering::~clustering() { if (_clusters) delete[] _clusters; if (_cluster_labels) delete[] _cluster_labels; }; void clustering::post_processing() { // rank snippets within clusters. rank_clusters_elements(); // rank clusters. compute_clusters_rank(); // sort clusters. std::stable_sort(_clusters,_clusters+_K,cluster::max_rank_cluster); // compute labels. compute_cluster_labels(); } // default ranking is as computed by seeks on the main list of results. void clustering::rank_elements(cluster &cl) { hash_map<uint32_t,hash_map<uint32_t,float,id_hash_uint>*,id_hash_uint>::iterator hit = cl._cpoints.begin(); while (hit!=cl._cpoints.end()) { search_snippet *sp = _qc->get_cached_snippet((*hit).first); sp->_seeks_ir = sp->_seeks_rank; ++hit; } } void clustering::rank_clusters_elements() { for (short c=0; c<_K; c++) rank_elements(_clusters[c]); } void clustering::compute_clusters_rank() { for (short c=0; c<_K; c++) _clusters[c].compute_rank(_qc); } void clustering::compute_cluster_labels() { for (short c=0; c<_K; c++) _clusters[c].compute_label(_qc); } hash_map<uint32_t,float,id_hash_uint>* clustering::get_point_features(const short &np) { short p = 0; hash_map<uint32_t,hash_map<uint32_t,float,id_hash_uint>*,id_hash_uint>::const_iterator hit = _points.begin(); while (hit!=_points.end()) { if (p == np) return (*hit).second; p++; ++hit; } return NULL; } } /* end of namespace. */<|fim▁end|>
<|file_name|>camera.py<|end_file_name|><|fim▁begin|># ============================================================================= # Federal University of Rio Grande do Sul (UFRGS) # Connectionist Artificial Intelligence Laboratory (LIAC) # Renato de Pontes Pereira - [email protected] # ============================================================================= # Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ============================================================================= from OpenGL.GL import * from OpenGL.GLU import * from OpenGL.GLUT import * import psi from psi.calc import clip<|fim▁hole|>from psi.euclid import Vector2 __all__ = ['Camera'] class Camera(object): def __init__(self, pos=Vector2(0, 0)): self.pos = pos self.half_size = Vector2(300, 300) self.zoom = 1.0 self._zoom_step = 0.5 self._scale_rate = 1/self.zoom def adjust(self, old_scale, new_scale): pass def zoom_out(self): self.zoom = clip(self.zoom+self._zoom_step, self._zoom_step, 10.5) old = self._scale_rate self._scale_rate = 1/self.zoom self.adjust(old, self._scale_rate) def zoom_in(self): self.zoom = clip(self.zoom-self._zoom_step, self._zoom_step, 10.5) old = self._scale_rate self._scale_rate = 1/self.zoom self.adjust(old, self._scale_rate) def reset_zoom(self): self.zoom = 1. self._scale_rate = 1/self.zoom def pan(self, delta): self.pos += delta def locate(self): glTranslatef(-self.pos.x+self.half_size.x, -self.pos.y+self.half_size.y, 0) glScalef(self._scale_rate, self._scale_rate, 0) def on_window_resize(self, size): half_size = size/2. diff = self.half_size - half_size # print self.half_size, '=>', half_size, '=', diff self.half_size = half_size # self.pan(-diff/4.)<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>class Provider(PhoneNumberProvider): formats = ( # Mobile # Government website: http://www.uke.gov.pl/numeracja-843 '50# ### ###', '51# ### ###', '53# ### ###', '57# ### ###', '60# ### ###', '66# ### ###', '69# ### ###', '72# ### ###', '73# ### ###', '78# ### ###', '79# ### ###', '88# ### ###', '+48 50# ### ###', '+48 51# ### ###', '+48 53# ### ###', '+48 57# ### ###', '+48 60# ### ###', '+48 66# ### ###', '+48 69# ### ###', '+48 72# ### ###', '+48 73# ### ###', '+48 78# ### ###', '+48 79# ### ###', '+48 88# ### ###', '32 ### ## ##', '+48 32 ### ## ##', '22 ### ## ##', '+48 22 ### ## ##', )<|fim▁end|>
from __future__ import unicode_literals from .. import Provider as PhoneNumberProvider
<|file_name|>Tx3gParser.java<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.android.exoplayer.text.tx3g; import com.google.android.exoplayer.text.Cue; import com.google.android.exoplayer.text.Subtitle; import com.google.android.exoplayer.text.SubtitleParser; import com.google.android.exoplayer.util.MimeTypes; import com.google.android.exoplayer.util.ParsableByteArray; /** * A {@link SubtitleParser} for tx3g. * <p> * Currently only supports parsing of a single text track. */ public final class Tx3gParser implements SubtitleParser { private final ParsableByteArray parsableByteArray; public Tx3gParser() { parsableByteArray = new ParsableByteArray(); } @Override public boolean canParse(String mimeType) { return MimeTypes.APPLICATION_TX3G.equals(mimeType); } @Override<|fim▁hole|> public Subtitle parse(byte[] bytes, int offset, int length) { parsableByteArray.reset(bytes, length); int textLength = parsableByteArray.readUnsignedShort(); if (textLength == 0) { return Tx3gSubtitle.EMPTY; } String cueText = parsableByteArray.readString(textLength); return new Tx3gSubtitle(new Cue(cueText)); } }<|fim▁end|>
<|file_name|>compressed_sparsity_pattern_05.cc<|end_file_name|><|fim▁begin|>// --------------------------------------------------------------------- // // Copyright (C) 2008 - 2014 by the deal.II authors // // This file is part of the deal.II library. // // The deal.II library is free software; you can use it, redistribute // it, and/or modify it under the terms of the GNU Lesser General // Public License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // The full text of the license can be found in the file LICENSE at // the top level of the deal.II distribution. // // --------------------------------------------------------------------- // check CompressedSparsityPattern::copy constructor with offdiagonals #include "sparsity_pattern_common.h" int main () { std::ofstream logfile("output"); logfile.setf(std::ios::fixed); deallog << std::setprecision(3); deallog.attach(logfile); deallog.depth_console(0); deallog.threshold_double(1.e-10); <|fim▁hole|><|fim▁end|>
copy_with_offdiagonals_2<CompressedSparsityPattern> (); }
<|file_name|>project.py<|end_file_name|><|fim▁begin|>from pprint import pformat from sqlalchemy import Column, ForeignKey, orm from sqlalchemy.types import String, Integer, Boolean, Text from sqlalchemy.schema import UniqueConstraint from intranet3 import memcache from intranet3.models import Base, User from intranet3.log import WARN_LOG, INFO_LOG, DEBUG_LOG LOG = INFO_LOG(__name__) DEBUG = DEBUG_LOG(__name__) WARN = WARN_LOG(__name__) SELECTOR_CACHE_KEY = 'SELECTORS_FOR_TRACKER_%s' STATUS = [ ('1', 'Initialization'), ('2', 'Analysis'), ('3', 'Conception'), ('4', 'Realization'), ('5', 'Support'), ('6', 'Closed'), ] def bugzilla_bug_list(tracker_url, bug_ids, project_selector=None): query = '&'.join(['bug_id=%s' % bug_id for bug_id in bug_ids]) return tracker_url + '/buglist.cgi?%s' % query def unfuddle_bug_list(tracker_url, bug_ids, project_selector=None): suffix = '/a#/projects/%s/ticket_reports/dynamic?conditions_string=%s' query = '|'.join(['number-eq-%s' % bug_id for bug_id in bug_ids]) return tracker_url + (suffix % (project_selector, query)) class Project(Base): __tablename__ = 'project' BUG_LIST_URL_CONTRUCTORS = { 'bugzilla': bugzilla_bug_list, 'rockzilla': bugzilla_bug_list, 'igozilla': bugzilla_bug_list, 'trac': lambda *args: '#', 'cookie_trac': lambda *args: '#', 'bitbucket': lambda *args: '#', 'pivotaltracker': lambda *args: '#', 'unfuddle': unfuddle_bug_list, } id = Column(Integer, primary_key=True, index=True) name = Column(String, nullable=False) coordinator_id = Column(Integer, ForeignKey('user.id'), nullable=True, index=True) client_id = Column(Integer, ForeignKey('client.id'), nullable=False, index=True) tracker_id = Column(Integer, ForeignKey('tracker.id'), nullable=False, index=True) turn_off_selectors = Column(Boolean, nullable=False, default=False) project_selector = Column(String, nullable=True) component_selector = Column(String, nullable=True) ticket_id_selector = Column(String, nullable=True) version_selector = Column(String, nullable=True) active = Column(Boolean, nullable=False) time_entries = orm.relationship('TimeEntry', backref='project', lazy='dynamic')<|fim▁hole|> google_wiki = Column(String, nullable=True) status = Column(Integer, nullable=True) mailing_url = Column(String, nullable=True) working_agreement = Column(Text, nullable=False, default='') definition_of_done = Column(Text, nullable=False, default='') definition_of_ready = Column(Text, nullable=False, default='') continuous_integration_url = Column(String, nullable=False, default='') backlog_url = Column(String, nullable=False, default='') __table_args__ = (UniqueConstraint('name', 'client_id', name='project_name_client_id_unique'), {}) def format_selector(self): if self.turn_off_selectors: return u'Turned off' if self.ticket_id_selector: return u'Tickets: %s' % (self.ticket_id_selector, ) else: return u'%s / %s / %s' % ( self.project_selector or u'*', self.component_selector or u'*', self.version_selector or u'*', ) def get_selector_tuple(self): """ Returns selector tuple ([ticket_ids], project_selector, component_selector) """ ticket_ids = [ int(v.strip()) for v in self.ticket_id_selector.split(',') ] if self.ticket_id_selector else None components = [ v.strip() for v in self.component_selector.split(',') ] if self.component_selector else [] versions = [ v.strip() for v in self.version_selector.split(',') ] if self.version_selector else [] return ( ticket_ids, self.project_selector, components, versions, ) def get_new_bug_url(self): """ Returns url for create new bug in project """ component_selector = self.component_selector if self.component_selector is not None and not self.component_selector.count(',') else None return self.tracker.get_new_bug_url(self.project_selector, component_selector) def get_bug_list_url(self, bug_ids): constructor = self.BUG_LIST_URL_CONTRUCTORS[self.tracker.type] return constructor(self.tracker.url, bug_ids, self.project_selector) @property def status_name(self): if self.status and len(STATUS) >= self.status: return STATUS[self.status-1][1] return None @property def coordinator(self): if self.coordinator_id is not None: return User.query.filter(User.id==self.coordinator_id).one() else: return self.client.coordinator class SelectorMapping(object): """ Simple storage for cached project selectors """ def __init__(self, tracker): """ Creates a selector mapping for given tracker None -> project_id project_name -> project_id (project_name, component_name) -> project_id """ self.tracker = tracker self.by_ticket_id = {} self.default = None self.by_project = {} # key: project_name self.by_component = {} # key: project_name, component_name self.by_version = {} # key: project_name, version self.by_component_version = {} # key: project_name, component_name, version cache_key = SELECTOR_CACHE_KEY % tracker.id mapping = memcache.get(cache_key) if mapping: self.clone(mapping) return projects = Project.query.filter(Project.tracker_id == tracker.id) \ .filter(Project.turn_off_selectors == False) \ .filter(Project.active == True) self.projects = dict([(project.id, project.name) for project in projects]) for project in projects: self._create_for_project(project) memcache.set(cache_key, self) DEBUG('Created selector mapping for tracker %s: %s, %s' % ( tracker.id, pformat(self.by_ticket_id), pformat(self.by_component)) ) def clone(self, mapping): self.default = mapping.default self.by_project = mapping.by_project self.by_component = mapping.by_component self.by_version = mapping.by_version self.by_component_version = mapping.by_component_version def _check_ticket_id_existance(self, ticket_id): if ticket_id in self.by_ticket_id: WARN(u'Overriding ticket ID for tracker from %s to %s' % ( self.by_ticket_id[ticket_id], ticket_id)) def _check_project_component_existance(self, project_component, project): """ Warn if we override a project """ if project_component is None: if None in self.by_component: WARN(u'Overriding default project for tracker [%s] from [%s] to [%s]' % ( self.tracker.name, self.projects[self.by_component[None]], project.name )) elif isinstance(project_component, (str, unicode)): project_name = project_component if project_name in self.by_component: WARN(u'Overriding project [%s] for tracker [%s] from [%s] to [%s]' % ( project_name, self.tracker.name, self.projects[self.by_component[project_name]], project.name )) else: project_name, component_name = project_component if (project_name, component_name) in self.by_component: WARN(u'Overriding project [%s] and component [%s] for tracker [%s] from [%s] to [%s]' % ( project_name, component_name, self.tracker.name, self.projects[self.by_component[(project_name, component_name)]], project.name )) def _create_for_project(self, project): ticket_ids, project_name, component_names, versions = project.get_selector_tuple() if ticket_ids: for ticket_id in ticket_ids: self._check_ticket_id_existance(ticket_id) self.by_ticket_id[ticket_id] = project.id # brak # tylko projekt # projekt + komponent # projekt + wersja # projekt + komponent + wersja if not project_name: # brak self._check_project_component_existance(None, project) self.default = project.id elif not component_names: if versions: # projekt + wersja for version in versions: self.by_version[(project_name, version)] = project.id else: # tylko projekt self._check_project_component_existance(project_name, project) self.by_project[project_name] = project.id elif not versions: # projekt + komponent for component_name in component_names: self._check_project_component_existance((project_name, component_name), project) self.by_component[(project_name, component_name)] = project.id else: # projekt + komponent + wersja for component_name in component_names: for version in versions: self.by_component_version[(project_name, component_name, version)] = project.id def match(self, id_, project, component, version=None): if id_ in self.by_ticket_id: return self.by_ticket_id[id_] project_id = self.by_component_version.get((project, component, version)) if project_id: return project_id project_id = self.by_component.get((project, component)) if project_id: return project_id project_id = self.by_version.get((project, version)) if project_id: return project_id project_id = self.by_project.get(project) if project_id: return project_id if self.default: return self.default WARN(u'map_to_project: Mapping to project/component/tracker %s/%s/%s failed' % (project, component, self.tracker.name)) @staticmethod def invalidate_for(tracker_id): memcache.delete(SELECTOR_CACHE_KEY % tracker_id) DEBUG(u'Invalidated selector mapping cache for tracker %s' % (tracker_id, ))<|fim▁end|>
sprints = orm.relationship('Sprint', backref='project', lazy='dynamic') google_card = Column(String, nullable=True)
<|file_name|>analytic_account_open.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright 2015 Eficent - Jordi Ballester Alomar # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). from odoo import api, fields, models class AnalyticAccountOpen(models.TransientModel): _name = 'analytic.account.open' _description = 'Open single analytic account' analytic_account_id = fields.Many2one( 'account.analytic.account', 'Analytic Account', required=True ) include_child = fields.Boolean( 'Include child accounts', default=True ) @api.model def _get_child_analytic_accounts(self, curr_id): result = {} result[curr_id] = True # Now add the children self.env.cr.execute(''' WITH RECURSIVE children AS ( SELECT parent_id, id FROM account_analytic_account WHERE parent_id = %s UNION ALL SELECT a.parent_id, a.id FROM account_analytic_account a JOIN children b ON(a.parent_id = b.id) ) SELECT * FROM children order by parent_id ''', (curr_id,)) res = self.env.cr.fetchall() for x, y in res: result[y] = True return result @api.multi def analytic_account_open_window(self): self.ensure_one() act_window_id = self.env.ref( 'analytic.action_account_analytic_account_form') result = act_window_id.read()[0]<|fim▁hole|> else: acc_ids.append(acc_id) result['domain'] = "[('id','in', ["+','.join(map(str, acc_ids))+"])]" return result<|fim▁end|>
acc_id = self.analytic_account_id.id acc_ids = [] if self.include_child: acc_ids = self._get_child_analytic_accounts(acc_id)
<|file_name|>sync.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /// Synchronous channels/ports /// /// This channel implementation differs significantly from the asynchronous /// implementations found next to it (oneshot/stream/share). This is an /// implementation of a synchronous, bounded buffer channel. /// /// Each channel is created with some amount of backing buffer, and sends will /// *block* until buffer space becomes available. A buffer size of 0 is valid, /// which means that every successful send is paired with a successful recv. /// /// This flavor of channels defines a new `send_opt` method for channels which /// is the method by which a message is sent but the task does not panic if it /// cannot be delivered. /// /// Another major difference is that send() will *always* return back the data /// if it couldn't be sent. This is because it is deterministically known when /// the data is received and when it is not received. /// /// Implementation-wise, it can all be summed up with "use a mutex plus some /// logic". The mutex used here is an OS native mutex, meaning that no user code /// is run inside of the mutex (to prevent context switching). This /// implementation shares almost all code for the buffered and unbuffered cases /// of a synchronous channel. There are a few branches for the unbuffered case, /// but they're mostly just relevant to blocking senders. use core::prelude::*; pub use self::Failure::*; use self::Blocker::*; use vec::Vec; use core::mem; use core::ptr; use sync::atomic::{Ordering, AtomicUsize}; use sync::mpsc::blocking::{self, WaitToken, SignalToken}; use sync::mpsc::select::StartResult::{self, Installed, Abort}; use sync::{Mutex, MutexGuard}; pub struct Packet<T> { /// Only field outside of the mutex. Just done for kicks, but mainly because /// the other shared channel already had the code implemented channels: AtomicUsize, lock: Mutex<State<T>>, } unsafe impl<T: Send> Send for Packet<T> { } unsafe impl<T: Send> Sync for Packet<T> { } struct State<T> { disconnected: bool, // Is the channel disconnected yet? queue: Queue, // queue of senders waiting to send data blocker: Blocker, // currently blocked task on this channel buf: Buffer<T>, // storage for buffered messages cap: usize, // capacity of this channel /// A curious flag used to indicate whether a sender failed or succeeded in /// blocking. This is used to transmit information back to the task that it /// must dequeue its message from the buffer because it was not received. /// This is only relevant in the 0-buffer case. This obviously cannot be /// safely constructed, but it's guaranteed to always have a valid pointer /// value. canceled: Option<&'static mut bool>, } unsafe impl<T: Send> Send for State<T> {} /// Possible flavors of threads who can be blocked on this channel. enum Blocker { BlockedSender(SignalToken), BlockedReceiver(SignalToken), NoneBlocked } /// Simple queue for threading tasks together. Nodes are stack-allocated, so /// this structure is not safe at all struct Queue { head: *mut Node, tail: *mut Node, } struct Node { token: Option<SignalToken>, next: *mut Node, } unsafe impl Send for Node {} /// A simple ring-buffer struct Buffer<T> { buf: Vec<Option<T>>, start: usize, size: usize, } #[derive(Debug)]<|fim▁hole|> Empty, Disconnected, } /// Atomically blocks the current thread, placing it into `slot`, unlocking `lock` /// in the meantime. This re-locks the mutex upon returning. fn wait<'a, 'b, T>(lock: &'a Mutex<State<T>>, mut guard: MutexGuard<'b, State<T>>, f: fn(SignalToken) -> Blocker) -> MutexGuard<'a, State<T>> { let (wait_token, signal_token) = blocking::tokens(); match mem::replace(&mut guard.blocker, f(signal_token)) { NoneBlocked => {} _ => unreachable!(), } drop(guard); // unlock wait_token.wait(); // block lock.lock().unwrap() // relock } /// Wakes up a thread, dropping the lock at the correct time fn wakeup<T>(token: SignalToken, guard: MutexGuard<State<T>>) { // We need to be careful to wake up the waiting task *outside* of the mutex // in case it incurs a context switch. drop(guard); token.signal(); } impl<T> Packet<T> { pub fn new(cap: usize) -> Packet<T> { Packet { channels: AtomicUsize::new(1), lock: Mutex::new(State { disconnected: false, blocker: NoneBlocked, cap: cap, canceled: None, queue: Queue { head: ptr::null_mut(), tail: ptr::null_mut(), }, buf: Buffer { buf: (0..cap + if cap == 0 {1} else {0}).map(|_| None).collect(), start: 0, size: 0, }, }), } } // wait until a send slot is available, returning locked access to // the channel state. fn acquire_send_slot(&self) -> MutexGuard<State<T>> { let mut node = Node { token: None, next: ptr::null_mut() }; loop { let mut guard = self.lock.lock().unwrap(); // are we ready to go? if guard.disconnected || guard.buf.size() < guard.buf.cap() { return guard; } // no room; actually block let wait_token = guard.queue.enqueue(&mut node); drop(guard); wait_token.wait(); } } pub fn send(&self, t: T) -> Result<(), T> { let mut guard = self.acquire_send_slot(); if guard.disconnected { return Err(t) } guard.buf.enqueue(t); match mem::replace(&mut guard.blocker, NoneBlocked) { // if our capacity is 0, then we need to wait for a receiver to be // available to take our data. After waiting, we check again to make // sure the port didn't go away in the meantime. If it did, we need // to hand back our data. NoneBlocked if guard.cap == 0 => { let mut canceled = false; assert!(guard.canceled.is_none()); guard.canceled = Some(unsafe { mem::transmute(&mut canceled) }); let mut guard = wait(&self.lock, guard, BlockedSender); if canceled {Err(guard.buf.dequeue())} else {Ok(())} } // success, we buffered some data NoneBlocked => Ok(()), // success, someone's about to receive our buffered data. BlockedReceiver(token) => { wakeup(token, guard); Ok(()) } BlockedSender(..) => panic!("lolwut"), } } pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> { let mut guard = self.lock.lock().unwrap(); if guard.disconnected { Err(super::TrySendError::Disconnected(t)) } else if guard.buf.size() == guard.buf.cap() { Err(super::TrySendError::Full(t)) } else if guard.cap == 0 { // With capacity 0, even though we have buffer space we can't // transfer the data unless there's a receiver waiting. match mem::replace(&mut guard.blocker, NoneBlocked) { NoneBlocked => Err(super::TrySendError::Full(t)), BlockedSender(..) => unreachable!(), BlockedReceiver(token) => { guard.buf.enqueue(t); wakeup(token, guard); Ok(()) } } } else { // If the buffer has some space and the capacity isn't 0, then we // just enqueue the data for later retrieval, ensuring to wake up // any blocked receiver if there is one. assert!(guard.buf.size() < guard.buf.cap()); guard.buf.enqueue(t); match mem::replace(&mut guard.blocker, NoneBlocked) { BlockedReceiver(token) => wakeup(token, guard), NoneBlocked => {} BlockedSender(..) => unreachable!(), } Ok(()) } } // Receives a message from this channel // // When reading this, remember that there can only ever be one receiver at // time. pub fn recv(&self) -> Result<T, ()> { let mut guard = self.lock.lock().unwrap(); // Wait for the buffer to have something in it. No need for a while loop // because we're the only receiver. let mut waited = false; if !guard.disconnected && guard.buf.size() == 0 { guard = wait(&self.lock, guard, BlockedReceiver); waited = true; } if guard.disconnected && guard.buf.size() == 0 { return Err(()) } // Pick up the data, wake up our neighbors, and carry on assert!(guard.buf.size() > 0); let ret = guard.buf.dequeue(); self.wakeup_senders(waited, guard); return Ok(ret); } pub fn try_recv(&self) -> Result<T, Failure> { let mut guard = self.lock.lock().unwrap(); // Easy cases first if guard.disconnected { return Err(Disconnected) } if guard.buf.size() == 0 { return Err(Empty) } // Be sure to wake up neighbors let ret = Ok(guard.buf.dequeue()); self.wakeup_senders(false, guard); return ret; } // Wake up pending senders after some data has been received // // * `waited` - flag if the receiver blocked to receive some data, or if it // just picked up some data on the way out // * `guard` - the lock guard that is held over this channel's lock fn wakeup_senders(&self, waited: bool, mut guard: MutexGuard<State<T>>) { let pending_sender1: Option<SignalToken> = guard.queue.dequeue(); // If this is a no-buffer channel (cap == 0), then if we didn't wait we // need to ACK the sender. If we waited, then the sender waking us up // was already the ACK. let pending_sender2 = if guard.cap == 0 && !waited { match mem::replace(&mut guard.blocker, NoneBlocked) { NoneBlocked => None, BlockedReceiver(..) => unreachable!(), BlockedSender(token) => { guard.canceled.take(); Some(token) } } } else { None }; mem::drop(guard); // only outside of the lock do we wake up the pending tasks pending_sender1.map(|t| t.signal()); pending_sender2.map(|t| t.signal()); } // Prepares this shared packet for a channel clone, essentially just bumping // a refcount. pub fn clone_chan(&self) { self.channels.fetch_add(1, Ordering::SeqCst); } pub fn drop_chan(&self) { // Only flag the channel as disconnected if we're the last channel match self.channels.fetch_sub(1, Ordering::SeqCst) { 1 => {} _ => return } // Not much to do other than wake up a receiver if one's there let mut guard = self.lock.lock().unwrap(); if guard.disconnected { return } guard.disconnected = true; match mem::replace(&mut guard.blocker, NoneBlocked) { NoneBlocked => {} BlockedSender(..) => unreachable!(), BlockedReceiver(token) => wakeup(token, guard), } } pub fn drop_port(&self) { let mut guard = self.lock.lock().unwrap(); if guard.disconnected { return } guard.disconnected = true; // If the capacity is 0, then the sender may want its data back after // we're disconnected. Otherwise it's now our responsibility to destroy // the buffered data. As with many other portions of this code, this // needs to be careful to destroy the data *outside* of the lock to // prevent deadlock. let _data = if guard.cap != 0 { mem::replace(&mut guard.buf.buf, Vec::new()) } else { Vec::new() }; let mut queue = mem::replace(&mut guard.queue, Queue { head: ptr::null_mut(), tail: ptr::null_mut(), }); let waiter = match mem::replace(&mut guard.blocker, NoneBlocked) { NoneBlocked => None, BlockedSender(token) => { *guard.canceled.take().unwrap() = true; Some(token) } BlockedReceiver(..) => unreachable!(), }; mem::drop(guard); loop { match queue.dequeue() { Some(token) => { token.signal(); } None => break, } } waiter.map(|t| t.signal()); } //////////////////////////////////////////////////////////////////////////// // select implementation //////////////////////////////////////////////////////////////////////////// // If Ok, the value is whether this port has data, if Err, then the upgraded // port needs to be checked instead of this one. pub fn can_recv(&self) -> bool { let guard = self.lock.lock().unwrap(); guard.disconnected || guard.buf.size() > 0 } // Attempts to start selection on this port. This can either succeed or fail // because there is data waiting. pub fn start_selection(&self, token: SignalToken) -> StartResult { let mut guard = self.lock.lock().unwrap(); if guard.disconnected || guard.buf.size() > 0 { Abort } else { match mem::replace(&mut guard.blocker, BlockedReceiver(token)) { NoneBlocked => {} BlockedSender(..) => unreachable!(), BlockedReceiver(..) => unreachable!(), } Installed } } // Remove a previous selecting task from this port. This ensures that the // blocked task will no longer be visible to any other threads. // // The return value indicates whether there's data on this port. pub fn abort_selection(&self) -> bool { let mut guard = self.lock.lock().unwrap(); match mem::replace(&mut guard.blocker, NoneBlocked) { NoneBlocked => true, BlockedSender(token) => { guard.blocker = BlockedSender(token); true } BlockedReceiver(token) => { drop(token); false } } } } #[unsafe_destructor] impl<T> Drop for Packet<T> { fn drop(&mut self) { assert_eq!(self.channels.load(Ordering::SeqCst), 0); let mut guard = self.lock.lock().unwrap(); assert!(guard.queue.dequeue().is_none()); assert!(guard.canceled.is_none()); } } //////////////////////////////////////////////////////////////////////////////// // Buffer, a simple ring buffer backed by Vec<T> //////////////////////////////////////////////////////////////////////////////// impl<T> Buffer<T> { fn enqueue(&mut self, t: T) { let pos = (self.start + self.size) % self.buf.len(); self.size += 1; let prev = mem::replace(&mut self.buf[pos], Some(t)); assert!(prev.is_none()); } fn dequeue(&mut self) -> T { let start = self.start; self.size -= 1; self.start = (self.start + 1) % self.buf.len(); let result = &mut self.buf[start]; result.take().unwrap() } fn size(&self) -> usize { self.size } fn cap(&self) -> usize { self.buf.len() } } //////////////////////////////////////////////////////////////////////////////// // Queue, a simple queue to enqueue tasks with (stack-allocated nodes) //////////////////////////////////////////////////////////////////////////////// impl Queue { fn enqueue(&mut self, node: &mut Node) -> WaitToken { let (wait_token, signal_token) = blocking::tokens(); node.token = Some(signal_token); node.next = ptr::null_mut(); if self.tail.is_null() { self.head = node as *mut Node; self.tail = node as *mut Node; } else { unsafe { (*self.tail).next = node as *mut Node; self.tail = node as *mut Node; } } wait_token } fn dequeue(&mut self) -> Option<SignalToken> { if self.head.is_null() { return None } let node = self.head; self.head = unsafe { (*node).next }; if self.head.is_null() { self.tail = ptr::null_mut(); } unsafe { (*node).next = ptr::null_mut(); Some((*node).token.take().unwrap()) } } }<|fim▁end|>
pub enum Failure {
<|file_name|>api.js<|end_file_name|><|fim▁begin|>YUI.add("yuidoc-meta", function(Y) {<|fim▁hole|> ], "modules": [ "gallery-audio" ], "allModules": [ { "displayName": "gallery-audio", "name": "gallery-audio" } ] } }; });<|fim▁end|>
Y.YUIDoc = { meta: { "classes": [ "Audio"
<|file_name|>class.IContextMenuable.js<|end_file_name|><|fim▁begin|>/* * Copyright 2007-2013 Charles du Jeu - Abstrium SAS <team (at) pyd.io> * This file is part of Pydio. * * Pydio is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Pydio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with Pydio. If not, see <http://www.gnu.org/licenses/>. * * The latest code can be found at <https://pydio.com>.<|fim▁hole|> * * Description : focusable component / Tab navigation */ Interface.create("IContextMenuable", { setContextualMenu : function(contextMenu){} });<|fim▁end|>
<|file_name|>logdna.py<|end_file_name|><|fim▁begin|>import logging import requests import socket import sys import threading import time from concurrent.futures import ThreadPoolExecutor from .configs import defaults from .utils import sanitize_meta, get_ip, normalize_list_option class LogDNAHandler(logging.Handler): def __init__(self, key, options={}): # Setup Handler logging.Handler.__init__(self) # Set Internal Logger self.internal_handler = logging.StreamHandler(sys.stdout) self.internal_handler.setLevel(logging.DEBUG) self.internalLogger = logging.getLogger('internal') self.internalLogger.addHandler(self.internal_handler) self.internalLogger.setLevel(logging.DEBUG) # Set the Custom Variables<|fim▁hole|> self.mac = options.get('mac', None) self.loglevel = options.get('level', 'info') self.app = options.get('app', '') self.env = options.get('env', '') self.tags = normalize_list_option(options, 'tags') self.custom_fields = normalize_list_option(options, 'custom_fields') self.custom_fields += defaults['META_FIELDS'] # Set the Connection Variables self.url = options.get('url', defaults['LOGDNA_URL']) self.request_timeout = options.get('request_timeout', defaults['DEFAULT_REQUEST_TIMEOUT']) self.user_agent = options.get('user_agent', defaults['USER_AGENT']) self.max_retry_attempts = options.get('max_retry_attempts', defaults['MAX_RETRY_ATTEMPTS']) self.max_retry_jitter = options.get('max_retry_jitter', defaults['MAX_RETRY_JITTER']) self.max_concurrent_requests = options.get( 'max_concurrent_requests', defaults['MAX_CONCURRENT_REQUESTS']) self.retry_interval_secs = options.get('retry_interval_secs', defaults['RETRY_INTERVAL_SECS']) # Set the Flush-related Variables self.buf = [] self.buf_size = 0 self.secondary = [] self.exception_flag = False self.flusher = None self.include_standard_meta = options.get('include_standard_meta', None) if self.include_standard_meta is not None: self.internalLogger.debug( '"include_standard_meta" option will be deprecated ' + 'removed in the upcoming major release') self.index_meta = options.get('index_meta', False) self.flush_limit = options.get('flush_limit', defaults['FLUSH_LIMIT']) self.flush_interval_secs = options.get('flush_interval', defaults['FLUSH_INTERVAL_SECS']) self.buf_retention_limit = options.get('buf_retention_limit', defaults['BUF_RETENTION_LIMIT']) # Set up the Thread Pools self.worker_thread_pool = ThreadPoolExecutor() self.request_thread_pool = ThreadPoolExecutor( max_workers=self.max_concurrent_requests) self.setLevel(logging.DEBUG) self.lock = threading.RLock() def start_flusher(self): if not self.flusher: self.flusher = threading.Timer(self.flush_interval_secs, self.flush) self.flusher.start() def close_flusher(self): if self.flusher: self.flusher.cancel() self.flusher = None def buffer_log(self, message): if self.worker_thread_pool: try: self.worker_thread_pool.submit(self.buffer_log_sync, message) except RuntimeError: self.buffer_log_sync(message) except Exception as e: self.internalLogger.debug('Error in calling buffer_log: %s', e) def buffer_log_sync(self, message): # Attempt to acquire lock to write to buf # otherwise write to secondary as flush occurs if self.lock.acquire(blocking=False): msglen = len(message['line']) if self.buf_size + msglen < self.buf_retention_limit: self.buf.append(message) self.buf_size += msglen else: self.internalLogger.debug( 'The buffer size exceeded the limit: %s', self.buf_retention_limit) if self.buf_size >= self.flush_limit and not self.exception_flag: self.close_flusher() self.flush() else: self.start_flusher() self.lock.release() else: self.secondary.append(message) def clean_after_success(self): self.close_flusher() self.buf.clear() self.buf_size = 0 self.exception_flag = False def flush(self): if self.worker_thread_pool: try: self.worker_thread_pool.submit(self.flush_sync) except RuntimeError: self.flush_sync() except Exception as e: self.internalLogger.debug('Error in calling flush: %s', e) def flush_sync(self): if self.buf_size == 0 and len(self.secondary) == 0: return if self.lock.acquire(blocking=False): if self.request_thread_pool: try: self.request_thread_pool.submit(self.try_request) except RuntimeError: self.try_request() except Exception as e: self.internalLogger.debug( 'Error in calling try_request: %s', e) finally: self.lock.release() else: self.lock.release() else: self.close_flusher() self.start_flusher() def try_request(self): self.buf.extend(self.secondary) self.secondary = [] data = {'e': 'ls', 'ls': self.buf} retries = 0 while retries < self.max_retry_attempts: retries += 1 if self.send_request(data): self.clean_after_success() break sleep_time = self.retry_interval_secs * (1 << (retries - 1)) sleep_time += self.max_retry_jitter time.sleep(sleep_time) if retries >= self.max_retry_attempts: self.internalLogger.debug( 'Flush exceeded %s tries. Discarding flush buffer', self.max_retry_attempts) self.close_flusher() self.exception_flag = True def send_request(self, data): try: response = requests.post(url=self.url, json=data, auth=('user', self.key), params={ 'hostname': self.hostname, 'ip': self.ip, 'mac': self.mac, 'tags': self.tags, 'now': int(time.time() * 1000) }, stream=True, timeout=self.request_timeout, headers={'user-agent': self.user_agent}) response.raise_for_status() status_code = response.status_code if status_code in [401, 403]: self.internalLogger.debug( 'Please provide a valid ingestion key.' + ' Discarding flush buffer') return True if status_code == 200: return True if status_code in [400, 500, 504]: self.internalLogger.debug('The request failed %s. Retrying...', response.reason) return True else: self.internalLogger.debug( 'The request failed: %s. Retrying...', response.reason) except requests.exceptions.Timeout as timeout: self.internalLogger.debug('Timeout error occurred %s. Retrying...', timeout) except requests.exceptions.RequestException as exception: self.internalLogger.debug( 'Error sending logs %s. Discarding flush buffer', exception) return True return False def emit(self, record): msg = self.format(record) record = record.__dict__ message = { 'hostname': self.hostname, 'timestamp': int(time.time() * 1000), 'line': msg, 'level': record['levelname'] or self.loglevel, 'app': self.app or record['module'], 'env': self.env } message['meta'] = {} for key in self.custom_fields: if key in record: if isinstance(record[key], tuple): message['meta'][key] = list(record[key]) elif record[key] is not None: message['meta'][key] = record[key] message['meta'] = sanitize_meta(message['meta'], self.index_meta) opts = {} if 'args' in record and not isinstance(record['args'], tuple): opts = record['args'] for key in ['app', 'env', 'hostname', 'level', 'timestamp']: if key in opts: message[key] = opts[key] self.buffer_log(message) def close(self): self.close_flusher() self.flush_sync() if self.worker_thread_pool: self.worker_thread_pool.shutdown(wait=True) self.worker_thread_pool = None if self.request_thread_pool: self.request_thread_pool.shutdown(wait=True) self.request_thread_pool = None logging.Handler.close(self)<|fim▁end|>
self.key = key self.hostname = options.get('hostname', socket.gethostname()) self.ip = options.get('ip', get_ip())
<|file_name|>81.Search_in_Rotated_Sorted_Array_II.cpp<|end_file_name|><|fim▁begin|>class Solution { public: bool search(vector<int>& nums, int target) { int l = 0, r = nums.size() - 1; if(nums.empty()) return false; while(l != r && nums[l] == nums[r]) l++; while(l < r) { int mid = (l + r) >> 1; if(nums[mid] == target) return true; else if(nums[mid] > nums[r]) l = mid + 1; else r = mid; } if(l != 0 && target <= nums[l - 1] && target >= nums[0]) { r = l - 1; l = 0; } else { r = nums.size() - 1; } while(l < r) { int mid = (l + r) >> 1; if(nums[mid] == target) return true; else if(nums[mid] > target) r = mid; else l = mid + 1; } if(nums[l] == target) return true;<|fim▁hole|> return false; } };<|fim▁end|>
<|file_name|>project-requirements-change.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python # Copyright (C) 2011 OpenStack, LLC. # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import collections import contextlib import os import shlex import shutil import subprocess import sys import tempfile requirement = None project = None def run_command(cmd): print(cmd) cmd_list = shlex.split(str(cmd)) p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() if p.returncode != 0: raise SystemError(err) return (out.strip(), err.strip()) class RequirementsList(object): def __init__(self, name, project): self.name = name self.reqs_by_file = {} self.project = project self.failed = False @property def reqs(self): return {k: v for d in self.reqs_by_file.values() for k, v in d.items()} def extract_reqs(self, content): reqs = collections.defaultdict(set) parsed = requirement.parse(content) for name, entries in parsed.items(): if not name: # Comments and other unprocessed lines continue list_reqs = [r for (r, line) in entries] # Strip the comments out before checking if there are duplicates list_reqs_stripped = [r._replace(comment='') for r in list_reqs] if len(list_reqs_stripped) != len(set(list_reqs_stripped)): print("Requirements file has duplicate entries " "for package %s : %r." % (name, list_reqs)) self.failed = True reqs[name].update(list_reqs) return reqs def process(self, strict=True): """Convert the project into ready to use data. - an iterable of requirement sets to check - each set has the following rules: - each has a list of Requirements objects - duplicates are not permitted within that list """ print("Checking %(name)s" % {'name': self.name}) # First, parse. for fname, content in self.project.get('requirements', {}).items(): print("Processing %(fname)s" % {'fname': fname}) if strict and not content.endswith('\n'): print("Requirements file %s does not " "end with a newline." % fname) self.reqs_by_file[fname] = self.extract_reqs(content) for name, content in project.extras(self.project).items(): print("Processing .[%(extra)s]" % {'extra': name}) self.reqs_by_file[name] = self.extract_reqs(content) def grab_args(): """Grab and return arguments""" parser = argparse.ArgumentParser( description="Check if project requirements have changed" ) parser.add_argument('--local', action='store_true', help='check local changes (not yet in git)') parser.add_argument('branch', nargs='?', default='master', help='target branch for diffs') parser.add_argument('--zc', help='what zuul cloner to call') parser.add_argument('--reqs', help='use a specified requirements tree') return parser.parse_args() @contextlib.contextmanager def tempdir(): try: reqroot = tempfile.mkdtemp() yield reqroot finally: shutil.rmtree(reqroot) def install_and_load_requirements(reqroot, reqdir): sha = run_command("git --git-dir %s/.git rev-parse HEAD" % reqdir)[0] print "requirements git sha: %s" % sha req_venv = os.path.join(reqroot, 'venv') req_pip = os.path.join(req_venv, 'bin/pip') req_lib = os.path.join(req_venv, 'lib/python2.7/site-packages') out, err = run_command("virtualenv " + req_venv) out, err = run_command(req_pip + " install " + reqdir) sys.path.append(req_lib) global project global requirement from openstack_requirements import project # noqa from openstack_requirements import requirement # noqa def _is_requirement_in_global_reqs(req, global_reqs): # Compare all fields except the extras field as the global # requirements should not have any lines with the extras syntax # example: oslo.db[xyz]<1.2.3 for req2 in global_reqs: if (req.package == req2.package and req.location == req2.location and req.specifiers == req2.specifiers and req.markers == req2.markers and req.comment == req2.comment): return True return False def main(): args = grab_args() branch = args.branch failed = False # build a list of requirements from the global list in the # openstack/requirements project so we can match them to the changes with tempdir() as reqroot: # Only clone requirements repo if no local repo is specified # on the command line. if args.reqs is None: reqdir = os.path.join(reqroot, "openstack/requirements") if args.zc is not None: zc = args.zc else: zc = '/usr/zuul-env/bin/zuul-cloner' out, err = run_command("%(zc)s " "--cache-dir /opt/git " "--workspace %(root)s " "git://git.openstack.org " "openstack/requirements" % dict(zc=zc, root=reqroot)) print out print err else: reqdir = args.reqs install_and_load_requirements(reqroot, reqdir) global_reqs = requirement.parse( open(reqdir + '/global-requirements.txt', 'rt').read()) for k, entries in global_reqs.items(): # Discard the lines: we don't need them. global_reqs[k] = set(r for (r, line) in entries) cwd = os.getcwd() # build a list of requirements in the proposed change, # and check them for style violations while doing so head = run_command("git rev-parse HEAD")[0] head_proj = project.read(cwd) head_reqs = RequirementsList('HEAD', head_proj) # Don't apply strict parsing rules to stable branches. # Reasoning is: # - devstack etc protect us from functional issues # - we're backporting to stable, so guarding against # aesthetics and DRY concerns is not our business anymore # - if in future we have other not-functional linty style # things to add, we don't want them to affect stable # either. head_strict = not branch.startswith('stable/') head_reqs.process(strict=head_strict) if not args.local: # build a list of requirements already in the target branch, # so that we can create a diff and identify what's being changed run_command("git remote update") run_command("git checkout remotes/origin/%s" % branch) branch_proj = project.read(cwd) # switch back to the proposed change now run_command("git checkout %s" % head) else: branch_proj = {'root': cwd} branch_reqs = RequirementsList(branch, branch_proj) # Don't error on the target branch being broken. branch_reqs.process(strict=False) # iterate through the changing entries and see if they match the global # equivalents we want enforced for fname, freqs in head_reqs.reqs_by_file.items(): print("Validating %(fname)s" % {'fname': fname}) for name, reqs in freqs.items(): counts = {} if (name in branch_reqs.reqs and reqs == branch_reqs.reqs[name]): # Unchanged [or a change that preserves a current value] continue if name not in global_reqs: failed = True print("Requirement %s not in openstack/requirements" % str(reqs)) continue if reqs == global_reqs[name]: continue for req in reqs: if req.extras: for extra in req.extras: counts[extra] = counts.get(extra, 0) + 1 else: counts[''] = counts.get('', 0) + 1<|fim▁hole|> failed = True print("Requirement for package %s : %s does " "not match openstack/requirements value : %s" % ( name, str(req), str(global_reqs[name]))) for extra, count in counts.items(): if count != len(global_reqs[name]): failed = True print("Package %s%s requirement does not match " "number of lines (%d) in " "openstack/requirements" % ( name, ('[%s]' % extra) if extra else '', len(global_reqs[name]))) # report the results if failed or head_reqs.failed or branch_reqs.failed: sys.exit(1) print("Updated requirements match openstack/requirements.") if __name__ == '__main__': main()<|fim▁end|>
if not _is_requirement_in_global_reqs( req, global_reqs[name]):
<|file_name|>index.js<|end_file_name|><|fim▁begin|>function HtmlElementsPlugin(locations) { this.locations = locations; } HtmlElementsPlugin.prototype.apply = function(compiler) { var self = this; compiler.plugin('compilation', function(compilation) { compilation.options.htmlElements = compilation.options.htmlElements || {}; compilation.plugin('html-webpack-plugin-before-html-generation', function(htmlPluginData, callback) { const locations = self.locations; if (locations) { const publicPath = htmlPluginData.assets.publicPath; Object.getOwnPropertyNames(locations).forEach(function(loc) { compilation.options.htmlElements[loc] = getHtmlElementString(locations[loc], publicPath); }); }<|fim▁hole|> callback(null, htmlPluginData); }); }); }; const RE_ENDS_WITH_BS = /\/$/; /** * Create an HTML tag with attributes from a map. * * Example: * createTag('link', { rel: "manifest", href: "/assets/manifest.json" }) * // <link rel="manifest" href="/assets/manifest.json"> * @param tagName The name of the tag * @param attrMap A Map of attribute names (keys) and their values. * @param publicPath a path to add to eh start of static asset url * @returns {string} */ function createTag(tagName, attrMap, publicPath) { publicPath = publicPath || ''; // add trailing slash if we have a publicPath and it doesn't have one. if (publicPath && !RE_ENDS_WITH_BS.test(publicPath)) { publicPath += '/'; } const attributes = Object.getOwnPropertyNames(attrMap) .filter(function(name) { return name[0] !== '='; } ) .map(function(name) { var value = attrMap[name]; if (publicPath) { // check if we have explicit instruction, use it if so (e.g: =herf: false) // if no instruction, use public path if it's href attribute. const usePublicPath = attrMap.hasOwnProperty('=' + name) ? !!attrMap['=' + name] : name === 'href'; if (usePublicPath) { // remove a starting trailing slash if the value has one so we wont have // value = publicPath + (value[0] === '/' ? value.substr(1) : value); } } return name + '="' + value + '"'; }); return '<' + tagName + ' ' + attributes.join(' ') + '>'; } /** * Returns a string representing all html elements defined in a data source. * * Example: * * const ds = { * link: [ * { rel: "apple-touch-icon", sizes: "57x57", href: "/assets/icon/apple-icon-57x57.png" } * ], * meta: [ * { name: "msapplication-TileColor", content: "#00bcd4" } * ] * } * * getHeadTags(ds); * // "<link rel="apple-touch-icon" sizes="57x57" href="/assets/icon/apple-icon-57x57.png">" * "<meta name="msapplication-TileColor" content="#00bcd4">" * * @returns {string} */ function getHtmlElementString(dataSource, publicPath) { return Object.getOwnPropertyNames(dataSource) .map(function(name) { if (Array.isArray(dataSource[name])) { return dataSource[name].map(function(attrs) { return createTag(name, attrs, publicPath); } ); } else { return [ createTag(name, dataSource[name], publicPath) ]; } }) .reduce(function(arr, curr) { return arr.concat(curr); }, []) .join('\n\t'); } module.exports = HtmlElementsPlugin;<|fim▁end|>
<|file_name|>control-group-success.js<|end_file_name|><|fim▁begin|>import { inject as service } from '@ember/service'; import Component from '@ember/component'; import { task } from 'ember-concurrency'; export default Component.extend({ router: service(), controlGroup: service(), store: service(), // public attrs model: null, controlGroupResponse: null, //internal state error: null, unwrapData: null, unwrap: task(function* (token) { let adapter = this.store.adapterFor('tools'); this.set('error', null); try { let response = yield adapter.toolAction('unwrap', null, { clientToken: token }); this.set('unwrapData', response.auth || response.data); this.controlGroup.deleteControlGroupToken(this.model.id); } catch (e) { this.set('error', `Token unwrap failed: ${e.errors[0]}`); } }).drop(), markAndNavigate: task(function* () { this.controlGroup.markTokenForUnwrap(this.model.id); let { url } = this.controlGroupResponse.uiParams; yield this.router.transitionTo(url); }).drop(),<|fim▁hole|><|fim▁end|>
});
<|file_name|>read_only_memory.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] use base::{MemoryMappingBuilder, SharedMemory}; use kvm::*; use kvm_sys::kvm_regs; use vm_memory::{GuestAddress, GuestMemory}; #[test] fn test_run() { /* 0000 268A07 mov al,[es:bx] 0003 0401 add al,0x1<|fim▁hole|> 0005 268807 mov [es:bx],al 0008 F4 hlt */ let code = [0x26, 0x8a, 0x07, 0x04, 0x01, 0x26, 0x88, 0x07, 0xf4]; let mem_size = 0x2000; let load_addr = GuestAddress(0x1000); let guest_mem = GuestMemory::new(&[]).unwrap(); let mem = SharedMemory::anon(mem_size).expect("failed to create shared memory"); let mmap = MemoryMappingBuilder::new(mem_size as usize) .from_shared_memory(&mem) .build() .expect("failed to create memory mapping"); mmap.write_slice(&code[..], load_addr.offset() as usize) .expect("Writing code to memory failed."); let kvm = Kvm::new().expect("new kvm failed"); let mut vm = Vm::new(&kvm, guest_mem).expect("new vm failed"); let vcpu = Vcpu::new(0, &kvm, &vm).expect("new vcpu failed"); let mut vcpu_sregs = vcpu.get_sregs().expect("get sregs failed"); vcpu_sregs.cs.base = 0; vcpu_sregs.cs.selector = 0; vcpu_sregs.es.base = 0x3000; vcpu_sregs.es.selector = 0; vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed"); let mut vcpu_regs: kvm_regs = unsafe { std::mem::zeroed() }; vcpu_regs.rip = load_addr.offset() as u64; vcpu_regs.rflags = 2; vcpu_regs.rax = 0x66; vcpu_regs.rbx = 0; vcpu.set_regs(&vcpu_regs).expect("set regs failed"); vm.add_memory_region( GuestAddress(0), Box::new( MemoryMappingBuilder::new(mem_size as usize) .from_shared_memory(&mem) .build() .expect("failed to create memory mapping"), ), false, false, ) .expect("failed to register memory"); // Give some read only memory for the test code to read from and force a vcpu exit when it reads // from it. let mem_ro = SharedMemory::anon(0x1000).expect("failed to create shared memory"); let mmap_ro = MemoryMappingBuilder::new(0x1000) .from_shared_memory(&mem_ro) .build() .expect("failed to create memory mapping"); mmap_ro .write_obj(vcpu_regs.rax as u8, 0) .expect("failed writing data to ro memory"); vm.add_memory_region( GuestAddress(vcpu_sregs.es.base), Box::new( MemoryMappingBuilder::new(0x1000) .from_shared_memory(&mem_ro) .build() .expect("failed to create memory mapping"), ), true, false, ) .expect("failed to register memory"); // Ensure we get exactly 1 exit from attempting to write to read only memory. let mut exits = 0; let runnable_vcpu = vcpu.to_runnable(None).unwrap(); loop { match runnable_vcpu.run().expect("run failed") { VcpuExit::Hlt => break, VcpuExit::MmioWrite { address, size: 1, data, } => { assert_eq!(address, vcpu_sregs.es.base); assert_eq!(data[0] as u64, vcpu_regs.rax + 1); exits += 1; } r => panic!("unexpected exit reason: {:?}", r), } } // Check that exactly 1 attempt to write to read only memory was made, and that the memory is // unchanged after that attempt. assert_eq!(exits, 1); assert_eq!( mmap_ro .read_obj::<u8>(0) .expect("failed to read data from ro memory"), vcpu_regs.rax as u8 ); }<|fim▁end|>
<|file_name|>bitcoin_zh.ts<|end_file_name|><|fim▁begin|><TS language="zh" version="2.1"> <context> <name>AddressBookPage</name> <message> <source>Right-click to edit address or label</source> <translation>右键单击来编辑地址或者标签</translation> </message> <message> <source>Create a new address</source> <translation>创建一个新地址</translation> </message> <message> <source>&amp;New</source> <translation>&amp;新建</translation> </message> <message> <source>Copy the currently selected address to the system clipboard</source> <translation>复制当前已选地址到系统剪切板</translation> </message> <message> <source>&amp;Copy</source> <translation>&amp;复制</translation> </message> <message> <source>C&amp;lose</source> <translation>关&amp;闭</translation> </message> <message> <source>Delete the currently selected address from the list</source> <translation>从列表中删除当前已选地址</translation> </message> <message> <source>Enter address or label to search</source> <translation>输入要搜索的地址或标签</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation>将当前选项卡中的数据导出到文件</translation> </message> <message> <source>&amp;Export</source> <translation>&amp;导出</translation> </message> <message> <source>&amp;Delete</source> <translation>&amp;删除</translation> </message> <message> <source>Choose the address to send coins to</source> <translation>选择想要发送币的地址</translation> </message> <message> <source>Choose the address to receive coins with</source> <translation>选择接收币的地址</translation> </message><|fim▁hole|> <source>C&amp;hoose</source> <translation>选&amp;择</translation> </message> <message> <source>Sending addresses</source> <translation>发送地址</translation> </message> <message> <source>Receiving addresses</source> <translation>接收地址</translation> </message> <message> <source>These are your Bitcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>这些是你的比特币支付地址。在发送之前,一定要核对金额和接收地址。</translation> </message> <message> <source>&amp;Copy Address</source> <translation>&amp;复制地址</translation> </message> <message> <source>Copy &amp;Label</source> <translation>复制 &amp;标记</translation> </message> <message> <source>&amp;Edit</source> <translation>&amp;编辑</translation> </message> <message> <source>Export Address List</source> <translation>导出地址列表</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>逗号分隔的文件 (*.csv)</translation> </message> <message> <source>Exporting Failed</source> <translation>导出失败</translation> </message> <message> <source>There was an error trying to save the address list to %1. Please try again.</source> <translation>试图将地址列表保存为%1时出错。请再试一次。</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <source>Label</source> <translation>标签</translation> </message> <message> <source>Address</source> <translation>地址</translation> </message> <message> <source>(no label)</source> <translation>(没有标签)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <source>Passphrase Dialog</source> <translation>密码对话框</translation> </message> <message> <source>Enter passphrase</source> <translation>输入密码</translation> </message> <message> <source>New passphrase</source> <translation>新密码</translation> </message> <message> <source>Repeat new passphrase</source> <translation>重复新密码</translation> </message> <message> <source>Encrypt wallet</source> <translation>加密钱包</translation> </message> <message> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>此操作需要您的钱包密码来解锁钱包</translation> </message> <message> <source>Unlock wallet</source> <translation>解锁钱包</translation> </message> <message> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>此操作需要您的钱包密码来解密钱包。</translation> </message> <message> <source>Decrypt wallet</source> <translation>解密钱包</translation> </message> <message> <source>Change passphrase</source> <translation>修改密码</translation> </message> <message> <source>Confirm wallet encryption</source> <translation>确认钱包密码</translation> </message> <message> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR BITCOINS&lt;/b&gt;!</source> <translation>注意:如果你加密了钱包,丢失了密码,您将&lt;b&gt;丢失所有的比特币。</translation> </message> <message> <source>Are you sure you wish to encrypt your wallet?</source> <translation>确定要加密您的钱包吗?</translation> </message> <message> <source>Wallet encrypted</source> <translation>加密钱包</translation> </message> <message> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>重要提示:您以前对钱包文件所做的任何备份都应该替换为新的加密钱包文件。出于安全原因,一旦您开始使用新的加密钱包,以前未加密钱包文件备份将变得无用。</translation> </message> <message> <source>Wallet encryption failed</source> <translation>钱包加密失败</translation> </message> <message> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>由于内部错误,钱包加密失败。您的钱包没有加密。</translation> </message> <message> <source>The supplied passphrases do not match.</source> <translation>提供的密码不匹配。</translation> </message> <message> <source>Wallet unlock failed</source> <translation>钱包解锁失败</translation> </message> <message> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>输入的钱包密码不正确。</translation> </message> <message> <source>Wallet decryption failed</source> <translation>钱包解密失败</translation> </message> <message> <source>Wallet passphrase was successfully changed.</source> <translation>钱包密码更改成功。</translation> </message> <message> <source>Warning: The Caps Lock key is on!</source> <translation>注意:大写锁定键打开了!</translation> </message> </context> <context> <name>BanTableModel</name> <message> <source>IP/Netmask</source> <translation>IP/子网掩码</translation> </message> <message> <source>Banned Until</source> <translation>禁止到</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <source>Sign &amp;message...</source> <translation>签名 &amp;消息...</translation> </message> <message> <source>Synchronizing with network...</source> <translation>与网络同步...</translation> </message> <message> <source>&amp;Overview</source> <translation>&amp;概述</translation> </message> <message> <source>Show general overview of wallet</source> <translation>显示钱包的一般概述</translation> </message> <message> <source>&amp;Transactions</source> <translation>&amp;交易</translation> </message> <message> <source>Browse transaction history</source> <translation>浏览交易历史</translation> </message> <message> <source>E&amp;xit</source> <translation>退&amp;出</translation> </message> <message> <source>Quit application</source> <translation>退出应用</translation> </message> <message> <source>&amp;About %1</source> <translation>&amp;关于 %1</translation> </message> <message> <source>Show information about %1</source> <translation>显示关于%1的信息</translation> </message> <message> <source>About &amp;Qt</source> <translation>关于 &amp;Qt</translation> </message> <message> <source>Show information about Qt</source> <translation>显示关于 Qt 的信息</translation> </message> <message> <source>&amp;Options...</source> <translation>&amp;选项</translation> </message> <message> <source>&amp;Encrypt Wallet...</source> <translation>&amp;加密钱包...</translation> </message> <message> <source>&amp;Backup Wallet...</source> <translation>&amp;备份钱包...</translation> </message> <message> <source>&amp;Change Passphrase...</source> <translation>&amp;修改密码...</translation> </message> <message> <source>Open &amp;URI...</source> <translation>打开 &amp;URI...</translation> </message> <message> <source>Wallet:</source> <translation>钱包:</translation> </message> <message> <source>Click to disable network activity.</source> <translation>单击禁用网络活动。</translation> </message> <message> <source>Network activity disabled.</source> <translation>禁用网络活动。</translation> </message> <message> <source>Click to enable network activity again.</source> <translation>单击再次启用网络活动。</translation> </message> <message> <source>Syncing Headers (%1%)...</source> <translation>正在同步Headers (%1%)...</translation> </message> <message> <source>Reindexing blocks on disk...</source> <translation>重新索引磁盘上的区块...</translation> </message> <message> <source>Proxy is &lt;b&gt;enabled&lt;/b&gt;: %1</source> <translation>启用代理:%1</translation> </message> <message> <source>Send coins to a Bitcoin address</source> <translation>发送比特币到一个比特币地址</translation> </message> <message> <source>Backup wallet to another location</source> <translation>备份钱包到另一个位置</translation> </message> <message> <source>Change the passphrase used for wallet encryption</source> <translation>更改钱包密码</translation> </message> <message> <source>&amp;Verify message...</source> <translation>&amp;验证消息...</translation> </message> <message> <source>&amp;Send</source> <translation>&amp;发送</translation> </message> <message> <source>&amp;Receive</source> <translation>&amp;接受</translation> </message> <message> <source>&amp;Show / Hide</source> <translation>&amp;显示 / 隐藏</translation> </message> <message> <source>Show or hide the main Window</source> <translation>显示或隐藏主窗口</translation> </message> <message> <source>Encrypt the private keys that belong to your wallet</source> <translation>加密您的钱包私钥</translation> </message> <message> <source>Sign messages with your Bitcoin addresses to prove you own them</source> <translation>用您的比特币地址签名信息,以证明拥有它们</translation> </message> <message> <source>Verify messages to ensure they were signed with specified Bitcoin addresses</source> <translation>验证消息,确保它们是用指定的比特币地址签名的</translation> </message> <message> <source>&amp;File</source> <translation>&amp;文件</translation> </message> <message> <source>&amp;Settings</source> <translation>&amp;设置</translation> </message> <message> <source>&amp;Help</source> <translation>&amp;帮助</translation> </message> <message> <source>Tabs toolbar</source> <translation>标签工具栏</translation> </message> <message> <source>Request payments (generates QR codes and bitcoin: URIs)</source> <translation>请求支付(生成二维码和比特币链接)</translation> </message> <message> <source>Show the list of used sending addresses and labels</source> <translation>显示使用过的发送地址或标签的列表</translation> </message> <message> <source>Show the list of used receiving addresses and labels</source> <translation>显示使用接收的地址或标签的列表</translation> </message> <message> <source>&amp;Command-line options</source> <translation>&amp;命令行选项</translation> </message> <message> <source>Indexing blocks on disk...</source> <translation>索引磁盘上的区块...</translation> </message> <message> <source>Processing blocks on disk...</source> <translation>处理磁盘上的区块...</translation> </message> <message> <source>%1 behind</source> <translation>%1 落后</translation> </message> <message> <source>Last received block was generated %1 ago.</source> <translation>上次接收到的块是在%1之前生成的。</translation> </message> <message> <source>Transactions after this will not yet be visible.</source> <translation>之后的交易还不可见。</translation> </message> <message> <source>Error</source> <translation>错误</translation> </message> <message> <source>Warning</source> <translation>警告</translation> </message> <message> <source>Information</source> <translation>消息</translation> </message> <message> <source>Up to date</source> <translation>最新的</translation> </message> <message> <source>&amp;Sending addresses</source> <translation>&amp;发送地址</translation> </message> <message> <source>&amp;Receiving addresses</source> <translation>&amp;接受地址</translation> </message> <message> <source>Open Wallet</source> <translation>打开钱包</translation> </message> <message> <source>Open a wallet</source> <translation>打开一个钱包</translation> </message> <message> <source>Close Wallet...</source> <translation>关闭钱包...</translation> </message> <message> <source>Close wallet</source> <translation>关闭钱包</translation> </message> <message> <source>Show the %1 help message to get a list with possible Bitcoin command-line options</source> <translation>显示%1帮助消息以获得可能包含Bitcoin命令行选项的列表</translation> </message> <message> <source>default wallet</source> <translation>默认钱包</translation> </message> <message> <source>No wallets available</source> <translation>无可用钱包</translation> </message> <message> <source>&amp;Window</source> <translation>&amp;窗口</translation> </message> <message> <source>Minimize</source> <translation>最小化</translation> </message> <message> <source>Zoom</source> <translation>缩放</translation> </message> <message> <source>Main Window</source> <translation>主窗口</translation> </message> <message> <source>%1 client</source> <translation>%1 客户端</translation> </message> <message> <source>Connecting to peers...</source> <translation>连接到节点...</translation> </message> <message> <source>Date: %1 </source> <translation>日期:%1 </translation> </message> <message> <source>Amount: %1 </source> <translation>总计:%1 </translation> </message> <message> <source>Wallet: %1 </source> <translation>钱包:%1 </translation> </message> <message> <source>Type: %1 </source> <translation>类型:%1 </translation> </message> <message> <source>Label: %1 </source> <translation>标签:%1 </translation> </message> <message> <source>Address: %1 </source> <translation>地址:%1 </translation> </message> <message> <source>Sent transaction</source> <translation>发送交易</translation> </message> <message> <source>Incoming transaction</source> <translation>入账交易</translation> </message> <message> <source>HD key generation is &lt;b&gt;enabled&lt;/b&gt;</source> <translation>HD密钥生成 &lt;b&gt;被允许&lt;/b&gt;</translation> </message> <message> <source>HD key generation is &lt;b&gt;disabled&lt;/b&gt;</source> <translation>HD密钥生成 &lt;b&gt;被禁止&lt;/b&gt;</translation> </message> <message> <source>Private key &lt;b&gt;disabled&lt;/b&gt;</source> <translation>私钥&lt;b&gt;被禁止&lt;/b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>钱包是&lt;b&gt;加密的&lt;/b&gt;,目前&lt;b&gt;已解锁&lt;/b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>钱包是&lt;b&gt;加密的&lt;/b&gt;,目前&lt;b&gt;已锁定&lt;/b&gt;</translation> </message> <message> <source>A fatal error occurred. Bitcoin can no longer continue safely and will quit.</source> <translation>发生了致命错误。比特币无法继续安全运行,将退出。</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <source>Coin Selection</source> <translation>币种选择</translation> </message> <message> <source>Quantity:</source> <translation>数量:</translation> </message> <message> <source>Bytes:</source> <translation>字节:</translation> </message> <message> <source>Amount:</source> <translation>总计:</translation> </message> <message> <source>Fee:</source> <translation>手续费:</translation> </message> <message> <source>Dust:</source> <translation>粉尘:</translation> </message> <message> <source>After Fee:</source> <translation>扣除费用后:</translation> </message> <message> <source>Change:</source> <translation>变化:</translation> </message> <message> <source>(un)select all</source> <translation>(未)选择所有</translation> </message> <message> <source>Tree mode</source> <translation>树模式</translation> </message> <message> <source>List mode</source> <translation>列表模式</translation> </message> <message> <source>Amount</source> <translation>总计</translation> </message> <message> <source>Date</source> <translation>日期</translation> </message> <message> <source>Confirmations</source> <translation>确认数</translation> </message> <message> <source>Confirmed</source> <translation>确认</translation> </message> <message> <source>Copy address</source> <translation>复制地址</translation> </message> <message> <source>Copy label</source> <translation>复制标签</translation> </message> <message> <source>Copy amount</source> <translation>复制金额</translation> </message> <message> <source>Copy transaction ID</source> <translation>复制交易 ID</translation> </message> <message> <source>Lock unspent</source> <translation>锁定未消费的</translation> </message> <message> <source>Unlock unspent</source> <translation>解锁未消费</translation> </message> <message> <source>Copy quantity</source> <translation>复制数量</translation> </message> <message> <source>Copy fee</source> <translation>复制费用</translation> </message> <message> <source>Copy after fee</source> <translation>复制扣除费用</translation> </message> <message> <source>Copy bytes</source> <translation>复制字节</translation> </message> <message> <source>Copy change</source> <translation>复制改变</translation> </message> <message> <source>(%1 locked)</source> <translation>(%1 锁住)</translation> </message> <message> <source>yes</source> <translation>是</translation> </message> <message> <source>no</source> <translation>否</translation> </message> <message> <source>This label turns red if any recipient receives an amount smaller than the current dust threshold.</source> <translation>如果任何接收方接收到的金额小于当前粉尘交易的阈值,则此标签将变为红色。</translation> </message> <message> <source>Can vary +/- %1 satoshi(s) per input.</source> <translation>每个输入可以改变+/- %1 satoshi(s)。</translation> </message> <message> <source>(no label)</source> <translation>(没有标签)</translation> </message> </context> <context> <name>CreateWalletActivity</name> </context> <context> <name>CreateWalletDialog</name> </context> <context> <name>EditAddressDialog</name> <message> <source>Edit Address</source> <translation>编辑地址</translation> </message> <message> <source>&amp;Label</source> <translation>&amp;标签</translation> </message> <message> <source>The label associated with this address list entry</source> <translation>与此地址列表关联的标签</translation> </message> <message> <source>The address associated with this address list entry. This can only be modified for sending addresses.</source> <translation>与此地址列表项关联的地址。只能修改为发送地址。</translation> </message> <message> <source>&amp;Address</source> <translation>&amp;地址</translation> </message> <message> <source>New sending address</source> <translation>新的发送地址</translation> </message> <message> <source>Edit receiving address</source> <translation>编辑接收地址</translation> </message> <message> <source>Edit sending address</source> <translation>编辑发送地址</translation> </message> <message> <source>The entered address "%1" is not a valid Bitcoin address.</source> <translation>输入的地址"%1"不是有效的比特币地址。</translation> </message> <message> <source>Address "%1" already exists as a receiving address with label "%2" and so cannot be added as a sending address.</source> <translation>地址“%1”作为标签为“%2”的接收地址已存在,无法新增为发送地址。</translation> </message> <message> <source>The entered address "%1" is already in the address book with label "%2".</source> <translation>输入的地址“%1”在标签为“%2”的地址簿中已存在</translation> </message> <message> <source>Could not unlock wallet.</source> <translation>不能解锁钱包</translation> </message> <message> <source>New key generation failed.</source> <translation>新的密钥生成失败</translation> </message> </context> <context> <name>FreespaceChecker</name> <message> <source>A new data directory will be created.</source> <translation>新的数据目录将创建</translation> </message> <message> <source>name</source> <translation>名称</translation> </message> <message> <source>Directory already exists. Add %1 if you intend to create a new directory here.</source> <translation>目录已存在。如果你打算在此创建新目录,添加 %1。</translation> </message> <message> <source>Path already exists, and is not a directory.</source> <translation>路径已存在,并非目录。</translation> </message> <message> <source>Cannot create data directory here.</source> <translation>无法在此创建数据目录。</translation> </message> </context> <context> <name>HelpMessageDialog</name> <message> <source>version</source> <translation>版本</translation> </message> <message> <source>About %1</source> <translation>关于 %1</translation> </message> <message> <source>Command-line options</source> <translation>命令行选项</translation> </message> </context> <context> <name>Intro</name> <message> <source>Welcome</source> <translation>欢迎</translation> </message> <message> <source>Welcome to %1.</source> <translation>欢迎到 %1。</translation> </message> <message> <source>Use the default data directory</source> <translation>使用默认的数据目录</translation> </message> <message> <source>Use a custom data directory:</source> <translation>使用自定数据目录</translation> </message> <message> <source>Bitcoin</source> <translation>比特币</translation> </message> <message> <source>Error</source> <translation>错误</translation> </message> </context> <context> <name>ModalOverlay</name> <message> <source>Form</source> <translation>表格</translation> </message> <message> <source>Unknown...</source> <translation>未知...</translation> </message> <message> <source>Last block time</source> <translation>最后的区块时间</translation> </message> <message> <source>calculating...</source> <translation>计算中...</translation> </message> <message> <source>Estimated time left until synced</source> <translation>估计的同步剩余时间</translation> </message> <message> <source>Hide</source> <translation>隐藏</translation> </message> </context> <context> <name>OpenURIDialog</name> <message> <source>URI:</source> <translation>URI: </translation> </message> </context> <context> <name>OpenWalletActivity</name> <message> <source>default wallet</source> <translation>默认钱包</translation> </message> <message> <source>Opening Wallet &lt;b&gt;%1&lt;/b&gt;...</source> <translation>正在打开钱包&lt;b&gt;%1&lt;/b&gt;</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <source>Options</source> <translation>选项</translation> </message> <message> <source>&amp;Main</source> <translation>&amp;主要</translation> </message> <message> <source>Automatically start %1 after logging in to the system.</source> <translation>登录系统后自动开始 %1。</translation> </message> <message> <source>Size of &amp;database cache</source> <translation>&amp;数据库缓存的大小</translation> </message> <message> <source>Number of script &amp;verification threads</source> <translation>脚本 &amp;验证线程的数量</translation> </message> <message> <source>Hide the icon from the system tray.</source> <translation>从系统托盘中隐藏图标</translation> </message> <message> <source>&amp;Hide tray icon</source> <translation>&amp;隐藏托盘图标</translation> </message> <message> <source>Open Configuration File</source> <translation>打开配置文件</translation> </message> <message> <source>Reset all client options to default.</source> <translation>重置所有客户端选项为默认</translation> </message> <message> <source>&amp;Reset Options</source> <translation>&amp;重置选项</translation> </message> <message> <source>&amp;Network</source> <translation>&amp;网络</translation> </message> <message> <source>W&amp;allet</source> <translation>钱&amp;包</translation> </message> <message> <source>Expert</source> <translation>专家</translation> </message> <message> <source>&amp;Connect through SOCKS5 proxy (default proxy):</source> <translation>&amp;通过 SOCKS5 代理连接(默认代理)</translation> </message> <message> <source>Proxy &amp;IP:</source> <translation>代理 &amp;IP:</translation> </message> <message> <source>&amp;Port:</source> <translation>&amp;端口</translation> </message> <message> <source>&amp;Window</source> <translation>&amp;窗口</translation> </message> <message> <source>Error</source> <translation>错误</translation> </message> </context> <context> <name>OverviewPage</name> <message> <source>Form</source> <translation>表格</translation> </message> </context> <context> <name>PaymentServer</name> </context> <context> <name>PeerTableModel</name> </context> <context> <name>QObject</name> <message> <source>Amount</source> <translation>总计</translation> </message> </context> <context> <name>QRImageWidget</name> </context> <context> <name>RPCConsole</name> <message> <source>Last block time</source> <translation>最后的区块时间</translation> </message> <message> <source>1 &amp;hour</source> <translation>1 &amp;小时</translation> </message> <message> <source>1 &amp;day</source> <translation>1 &amp;天</translation> </message> <message> <source>1 &amp;week</source> <translation>1 &amp;周</translation> </message> <message> <source>1 &amp;year</source> <translation>1 &amp;年</translation> </message> <message> <source>&amp;Disconnect</source> <translation>&amp;断开连接</translation> </message> </context> <context> <name>ReceiveCoinsDialog</name> <message> <source>&amp;Amount:</source> <translation>&amp;总计:</translation> </message> <message> <source>&amp;Label:</source> <translation>&amp;标签:</translation> </message> <message> <source>&amp;Message:</source> <translation>&amp;消息:</translation> </message> <message> <source>Copy label</source> <translation>复制标签</translation> </message> <message> <source>Copy amount</source> <translation>复制金额</translation> </message> </context> <context> <name>ReceiveRequestDialog</name> <message> <source>Address</source> <translation>地址</translation> </message> <message> <source>Amount</source> <translation>总计</translation> </message> <message> <source>Label</source> <translation>标签</translation> </message> </context> <context> <name>RecentRequestsTableModel</name> <message> <source>Date</source> <translation>日期</translation> </message> <message> <source>Label</source> <translation>标签</translation> </message> <message> <source>(no label)</source> <translation>(没有标签)</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <source>Insufficient funds!</source> <translation>余额不足</translation> </message> <message> <source>Quantity:</source> <translation>数量:</translation> </message> <message> <source>Bytes:</source> <translation>字节:</translation> </message> <message> <source>Amount:</source> <translation>总计:</translation> </message> <message> <source>Fee:</source> <translation>手续费:</translation> </message> <message> <source>After Fee:</source> <translation>扣除费用后:</translation> </message> <message> <source>Change:</source> <translation>变化:</translation> </message> <message> <source>Choose...</source> <translation>选择...</translation> </message> <message> <source>Warning: Fee estimation is currently not possible.</source> <translation>警告:目前无法估算费用。</translation> </message> <message> <source>Specify a custom fee per kB (1,000 bytes) of the transaction's virtual size. Note: Since the fee is calculated on a per-byte basis, a fee of "100 satoshis per kB" for a transaction size of 500 bytes (half of 1 kB) would ultimately yield a fee of only 50 satoshis.</source> <translation>指定交易虚拟大小的每kB(1,000字节)的自定义费用。 注意:由于费用是按字节计算的,对于大小为500字节(1 kB的一半)的交易,“每kB 100 satoshis”的费用最终只会产生50 satoshis的费用。</translation> </message> <message> <source>Hide</source> <translation>隐藏</translation> </message> <message> <source>Send to multiple recipients at once</source> <translation>一次发送到多个接收</translation> </message> <message> <source>Dust:</source> <translation>粉尘:</translation> </message> <message> <source>When there is less transaction volume than space in the blocks, miners as well as relaying nodes may enforce a minimum fee. Paying only this minimum fee is just fine, but be aware that this can result in a never confirming transaction once there is more demand for bitcoin transactions than the network can process.</source> <translation>当交易量小于块的空间时,矿工和中继节点可以强制执行最低费用。只付最低费用就可以了,但注意,一旦比特币交易的需求超出网络的处理能力,就可能导致交易无法确认。</translation> </message> <message> <source>A too low fee might result in a never confirming transaction (read the tooltip)</source> <translation>太低的费用可能导致永远无法确认交易(阅读工具提示)</translation> </message> <message> <source>Confirmation time target:</source> <translation>目标确认时间:</translation> </message> <message> <source>Copy quantity</source> <translation>复制数量</translation> </message> <message> <source>Copy amount</source> <translation>复制金额</translation> </message> <message> <source>Copy fee</source> <translation>复制费用</translation> </message> <message> <source>Copy after fee</source> <translation>复制扣除费用</translation> </message> <message> <source>Copy bytes</source> <translation>复制字节</translation> </message> <message> <source>Copy change</source> <translation>复制改变</translation> </message> <message> <source>Are you sure you want to send?</source> <translation>确定发送么?</translation> </message> <message> <source>Please, review your transaction.</source> <translation>请检查您的交易。</translation> </message> <message> <source>Transaction fee</source> <translation>手续费</translation> </message> <message> <source>The recipient address is not valid. Please recheck.</source> <translation>收款人地址无效,请再次确认。</translation> </message> <message> <source>The amount to pay must be larger than 0.</source> <translation>支付的总额必须大于0。</translation> </message> <message> <source>The amount exceeds your balance.</source> <translation>总额超过你的余额。</translation> </message> <message> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>当包含%1交易费用时,总额超过你的余额。</translation> </message> <message> <source>Duplicate address found: addresses should only be used once each.</source> <translation>发现重复地址:每个地址只能使用一次。</translation> </message> <message> <source>Transaction creation failed!</source> <translation>交易创建失败!</translation> </message> <message> <source>A fee higher than %1 is considered an absurdly high fee.</source> <translation>高于%1的手续费被认为非常高的手续费。</translation> </message> <message> <source>Payment request expired.</source> <translation>支付请求已过期。</translation> </message> <message> <source>Warning: Invalid Bitcoin address</source> <translation>警告:比特币地址无效</translation> </message> <message> <source>The address you selected for change is not part of this wallet. Any or all funds in your wallet may be sent to this address. Are you sure?</source> <translation>您选择更改的地址不在此钱包中。钱包里的所有资金都可以发送到这个地址。你确定吗?</translation> </message> <message> <source>(no label)</source> <translation>(没有标签)</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <source>&amp;Label:</source> <translation>&amp;标签:</translation> </message> <message> <source>Choose previously used address</source> <translation>选择以前使用的地址</translation> </message> <message> <source>The Bitcoin address to send the payment to</source> <translation>支付到的比特币地址</translation> </message> <message> <source>The fee will be deducted from the amount being sent. The recipient will receive less bitcoins than you enter in the amount field. If multiple recipients are selected, the fee is split equally.</source> <translation>手续费将从发出的总额中扣除。接受者收到的比特币将少于你输入的金额字段。如果选择了多个接受者,手续费将平均分配。</translation> </message> <message> <source>This is an unauthenticated payment request.</source> <translation>这是一个未经身份验证的付款请求。</translation> </message> <message> <source>Enter a label for this address to add it to the list of used addresses</source> <translation>输入此地址的标签,将其添加到使用的地址列表中</translation> </message> <message> <source>A message that was attached to the bitcoin: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the Bitcoin network.</source> <translation>附在比特币上的消息:URI将与交易一起存储,供参考。注意:此信息不会通过比特币网络发送。</translation> </message> </context> <context> <name>ShutdownWindow</name> </context> <context> <name>SignVerifyMessageDialog</name> <message> <source>Choose previously used address</source> <translation>选择以前使用的地址</translation> </message> </context> <context> <name>TrafficGraphWidget</name> </context> <context> <name>TransactionDesc</name> <message> <source>Date</source> <translation>日期</translation> </message> <message> <source>Transaction fee</source> <translation>手续费</translation> </message> <message> <source>Amount</source> <translation>总计</translation> </message> </context> <context> <name>TransactionDescDialog</name> </context> <context> <name>TransactionTableModel</name> <message> <source>Date</source> <translation>日期</translation> </message> <message> <source>Label</source> <translation>标签</translation> </message> <message> <source>(no label)</source> <translation>(没有标签)</translation> </message> </context> <context> <name>TransactionView</name> <message> <source>Copy address</source> <translation>复制地址</translation> </message> <message> <source>Copy label</source> <translation>复制标签</translation> </message> <message> <source>Copy amount</source> <translation>复制金额</translation> </message> <message> <source>Copy transaction ID</source> <translation>复制交易 ID</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>逗号分隔的文件 (*.csv)</translation> </message> <message> <source>Confirmed</source> <translation>确认</translation> </message> <message> <source>Date</source> <translation>日期</translation> </message> <message> <source>Label</source> <translation>标签</translation> </message> <message> <source>Address</source> <translation>地址</translation> </message> <message> <source>Exporting Failed</source> <translation>导出失败</translation> </message> </context> <context> <name>UnitDisplayStatusBarControl</name> </context> <context> <name>WalletController</name> <message> <source>Close wallet</source> <translation>关闭钱包</translation> </message> </context> <context> <name>WalletFrame</name> </context> <context> <name>WalletModel</name> <message> <source>default wallet</source> <translation>默认钱包</translation> </message> </context> <context> <name>WalletView</name> <message> <source>&amp;Export</source> <translation>&amp;导出</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation>将当前选项卡中的数据导出到文件</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <source>Transaction too large</source> <translation>超额转账</translation> </message> <message> <source>Insufficient funds</source> <translation>余额不足</translation> </message> <message> <source>Loading wallet...</source> <translation>正在载入钱包...</translation> </message> <message> <source>Rescanning...</source> <translation>再次扫描...</translation> </message> <message> <source>Done loading</source> <translation>载入完成</translation> </message> </context> </TS><|fim▁end|>
<message>
<|file_name|>xxhash64.js<|end_file_name|><|fim▁begin|>/** xxHash64 implementation in pure Javascript Copyright (C) 2016, Pierre Curto MIT license */ var UINT64 = require('cuint').UINT64 /* * Constants */ var PRIME64_1 = UINT64( '11400714785074694791' ) var PRIME64_2 = UINT64( '14029467366897019727' ) var PRIME64_3 = UINT64( '1609587929392839161' ) var PRIME64_4 = UINT64( '9650029242287828579' ) var PRIME64_5 = UINT64( '2870177450012600261' ) /** * Convert string to proper UTF-8 array * @param str Input string * @returns {Uint8Array} UTF8 array is returned as uint8 array */ function toUTF8Array (str) { var utf8 = [] for (var i=0, n=str.length; i < n; i++) { var charcode = str.charCodeAt(i) if (charcode < 0x80) utf8.push(charcode) else if (charcode < 0x800) { utf8.push(0xc0 | (charcode >> 6), 0x80 | (charcode & 0x3f)) } else if (charcode < 0xd800 || charcode >= 0xe000) { utf8.push(0xe0 | (charcode >> 12), 0x80 | ((charcode>>6) & 0x3f), 0x80 | (charcode & 0x3f)) } // surrogate pair else { i++; // UTF-16 encodes 0x10000-0x10FFFF by // subtracting 0x10000 and splitting the // 20 bits of 0x0-0xFFFFF into two halves charcode = 0x10000 + (((charcode & 0x3ff)<<10) | (str.charCodeAt(i) & 0x3ff)) utf8.push(0xf0 | (charcode >>18), 0x80 | ((charcode>>12) & 0x3f), 0x80 | ((charcode>>6) & 0x3f), 0x80 | (charcode & 0x3f)) } } return new Uint8Array(utf8) } /** * XXH64 object used as a constructor or a function * @constructor * or * @param {Object|String} input data * @param {Number|UINT64} seed * @return ThisExpression * or * @return {UINT64} xxHash */ function XXH64 () { if (arguments.length == 2) return new XXH64( arguments[1] ).update( arguments[0] ).digest() if (!(this instanceof XXH64)) return new XXH64( arguments[0] ) init.call(this, arguments[0]) } /** * Initialize the XXH64 instance with the given seed * @method init * @param {Number|Object} seed as a number or an unsigned 32 bits integer * @return ThisExpression */ function init (seed) { this.seed = seed instanceof UINT64 ? seed.clone() : UINT64(seed) this.v1 = this.seed.clone().add(PRIME64_1).add(PRIME64_2) this.v2 = this.seed.clone().add(PRIME64_2) this.v3 = this.seed.clone() this.v4 = this.seed.clone().subtract(PRIME64_1) this.total_len = 0 this.memsize = 0 this.memory = null return this } XXH64.prototype.init = init /** * Add data to be computed for the XXH64 hash * @method update * @param {String|Buffer|ArrayBuffer} input as a string or nodejs Buffer or ArrayBuffer * @return ThisExpression */ XXH64.prototype.update = function (input) { var isArrayBuffer // Convert all strings to utf-8 first (issue #5) if (typeof input == 'string') { input = toUTF8Array(input) isArrayBuffer = true } if (typeof ArrayBuffer !== "undefined" && input instanceof ArrayBuffer) { isArrayBuffer = true input = new Uint8Array(input); } var p = 0 var len = input.length var bEnd = p + len if (len == 0) return this this.total_len += len if (this.memsize == 0) { if (isArrayBuffer) { this.memory = new Uint8Array(32) } else { this.memory = new Buffer(32) } } if (this.memsize + len < 32) // fill in tmp buffer { // XXH64_memcpy(this.memory + this.memsize, input, len) if (isArrayBuffer) { this.memory.set( input.subarray(0, len), this.memsize ) } else { input.copy( this.memory, this.memsize, 0, len ) } this.memsize += len return this } if (this.memsize > 0) // some data left from previous update { // XXH64_memcpy(this.memory + this.memsize, input, 16-this.memsize); if (isArrayBuffer) { this.memory.set( input.subarray(0, 32 - this.memsize), this.memsize ) } else { input.copy( this.memory, this.memsize, 0, 32 - this.memsize ) } var p64 = 0 var other other = UINT64( (this.memory[p64+1] << 8) | this.memory[p64] , (this.memory[p64+3] << 8) | this.memory[p64+2] , (this.memory[p64+5] << 8) | this.memory[p64+4] , (this.memory[p64+7] << 8) | this.memory[p64+6] ) this.v1.add( other.multiply(PRIME64_2) ).rotl(31).multiply(PRIME64_1); p64 += 8 other = UINT64( (this.memory[p64+1] << 8) | this.memory[p64] , (this.memory[p64+3] << 8) | this.memory[p64+2] , (this.memory[p64+5] << 8) | this.memory[p64+4] , (this.memory[p64+7] << 8) | this.memory[p64+6] ) this.v2.add( other.multiply(PRIME64_2) ).rotl(31).multiply(PRIME64_1); p64 += 8 other = UINT64( (this.memory[p64+1] << 8) | this.memory[p64] , (this.memory[p64+3] << 8) | this.memory[p64+2] , (this.memory[p64+5] << 8) | this.memory[p64+4] , (this.memory[p64+7] << 8) | this.memory[p64+6] ) this.v3.add( other.multiply(PRIME64_2) ).rotl(31).multiply(PRIME64_1); p64 += 8 other = UINT64( (this.memory[p64+1] << 8) | this.memory[p64] , (this.memory[p64+3] << 8) | this.memory[p64+2] , (this.memory[p64+5] << 8) | this.memory[p64+4] , (this.memory[p64+7] << 8) | this.memory[p64+6] ) this.v4.add( other.multiply(PRIME64_2) ).rotl(31).multiply(PRIME64_1); p += 32 - this.memsize this.memsize = 0 } if (p <= bEnd - 32) { var limit = bEnd - 32 do { var other other = UINT64( (input[p+1] << 8) | input[p] , (input[p+3] << 8) | input[p+2] , (input[p+5] << 8) | input[p+4] , (input[p+7] << 8) | input[p+6] ) this.v1.add( other.multiply(PRIME64_2) ).rotl(31).multiply(PRIME64_1); p += 8 other = UINT64( (input[p+1] << 8) | input[p] , (input[p+3] << 8) | input[p+2] , (input[p+5] << 8) | input[p+4] , (input[p+7] << 8) | input[p+6] ) this.v2.add( other.multiply(PRIME64_2) ).rotl(31).multiply(PRIME64_1); p += 8 other = UINT64( (input[p+1] << 8) | input[p] , (input[p+3] << 8) | input[p+2] , (input[p+5] << 8) | input[p+4] , (input[p+7] << 8) | input[p+6] ) this.v3.add( other.multiply(PRIME64_2) ).rotl(31).multiply(PRIME64_1); p += 8 other = UINT64( (input[p+1] << 8) | input[p] , (input[p+3] << 8) | input[p+2] , (input[p+5] << 8) | input[p+4] , (input[p+7] << 8) | input[p+6] ) this.v4.add( other.multiply(PRIME64_2) ).rotl(31).multiply(PRIME64_1); p += 8 } while (p <= limit) } if (p < bEnd) { // XXH64_memcpy(this.memory, p, bEnd-p); if (isArrayBuffer) { this.memory.set( input.subarray(p, bEnd), this.memsize ) } else { input.copy( this.memory, this.memsize, p, bEnd ) } this.memsize = bEnd - p } return this } /** * Finalize the XXH64 computation. The XXH64 instance is ready for reuse for the given seed * @method digest * @return {UINT64} xxHash */ XXH64.prototype.digest = function () { var input = this.memory var p = 0 var bEnd = this.memsize var h64, h var u = new UINT64 if (this.total_len >= 32) { h64 = this.v1.clone().rotl(1) h64.add( this.v2.clone().rotl(7) ) h64.add( this.v3.clone().rotl(12) ) h64.add( this.v4.clone().rotl(18) ) h64.xor( this.v1.multiply(PRIME64_2).rotl(31).multiply(PRIME64_1) ) h64.multiply(PRIME64_1).add(PRIME64_4) h64.xor( this.v2.multiply(PRIME64_2).rotl(31).multiply(PRIME64_1) ) h64.multiply(PRIME64_1).add(PRIME64_4) h64.xor( this.v3.multiply(PRIME64_2).rotl(31).multiply(PRIME64_1) ) h64.multiply(PRIME64_1).add(PRIME64_4)<|fim▁hole|> h64.multiply(PRIME64_1).add(PRIME64_4) } else { h64 = this.seed.clone().add( PRIME64_5 ) } h64.add( u.fromNumber(this.total_len) ) while (p <= bEnd - 8) { u.fromBits( (input[p+1] << 8) | input[p] , (input[p+3] << 8) | input[p+2] , (input[p+5] << 8) | input[p+4] , (input[p+7] << 8) | input[p+6] ) u.multiply(PRIME64_2).rotl(31).multiply(PRIME64_1) h64 .xor(u) .rotl(27) .multiply( PRIME64_1 ) .add( PRIME64_4 ) p += 8 } if (p + 4 <= bEnd) { u.fromBits( (input[p+1] << 8) | input[p] , (input[p+3] << 8) | input[p+2] , 0 , 0 ) h64 .xor( u.multiply(PRIME64_1) ) .rotl(23) .multiply( PRIME64_2 ) .add( PRIME64_3 ) p += 4 } while (p < bEnd) { u.fromBits( input[p++], 0, 0, 0 ) h64 .xor( u.multiply(PRIME64_5) ) .rotl(11) .multiply(PRIME64_1) } h = h64.clone().shiftRight(33) h64.xor(h).multiply(PRIME64_2) h = h64.clone().shiftRight(29) h64.xor(h).multiply(PRIME64_3) h = h64.clone().shiftRight(32) h64.xor(h) // Reset the state this.init( this.seed ) return h64 } module.exports = XXH64<|fim▁end|>
h64.xor( this.v4.multiply(PRIME64_2).rotl(31).multiply(PRIME64_1) )
<|file_name|>chardistribution.rs<|end_file_name|><|fim▁begin|>use super::big5freq::*; use super::euckrfreq::*; use super::euctwfreq::*; use super::gb2312freq::*; use super::jisfreq::*; pub trait CharDistributionAnalysis { fn reset(&mut self); fn feed(&mut self, char: &[u8], char_len: usize); fn get_confidence(&self) -> f32; fn got_enough_data(&self) -> bool; fn get_order(&self, char: &[u8]) -> Option<usize>; } pub struct BaseCharDistributionAnalysis<'a> { c_enough_data_threshold: usize, c_minimum_data_threshold: usize, c_sure_yes: f32, c_sure_no: f32, m_char_to_freq_order: Option<&'a [u16]>, m_table_size: usize, m_typical_distribution_ratio: f32, m_done: bool, m_total_chars: usize, m_freq_chars: usize, } impl<'a> BaseCharDistributionAnalysis<'a> { pub fn new() -> BaseCharDistributionAnalysis<'a> { BaseCharDistributionAnalysis { c_enough_data_threshold: 1024, c_minimum_data_threshold: 3, c_sure_yes: 0.99, c_sure_no: 0.01, m_char_to_freq_order: None, m_table_size: 0, m_typical_distribution_ratio: 1.0, m_done: false, m_total_chars: 0, m_freq_chars: 0, } } } impl<'a> CharDistributionAnalysis for BaseCharDistributionAnalysis<'a> { fn reset(&mut self) { self.m_done = false; self.m_total_chars = 0; self.m_freq_chars = 0; } fn get_confidence(&self) -> f32 { if (self.m_total_chars <= 0) || (self.m_freq_chars <= self.c_minimum_data_threshold) { return self.c_sure_no; } if self.m_total_chars != self.m_freq_chars { let r = self.m_freq_chars as f32 / ((self.m_total_chars - self.m_freq_chars) as f32 * self.m_typical_distribution_ratio); if r < self.c_sure_yes { return r; } } return self.c_sure_yes; } fn got_enough_data(&self) -> bool { self.m_total_chars > self.c_enough_data_threshold } fn feed(&mut self, _: &[u8], _: usize) {}<|fim▁hole|> None } } pub struct EUCTWDistributionAnalysis<'a> { base: BaseCharDistributionAnalysis<'a>, } impl<'a> EUCTWDistributionAnalysis<'a> { pub fn new() -> EUCTWDistributionAnalysis<'a> { let mut x = EUCTWDistributionAnalysis { base: BaseCharDistributionAnalysis::new() }; x.base.m_char_to_freq_order = Some(EUCTW_CHAR_TO_FREQ_ORDER); x.base.m_table_size = EUCTW_TABLE_SIZE; x.base.m_typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO; x } } impl<'a> CharDistributionAnalysis for EUCTWDistributionAnalysis<'a> { fn reset(&mut self) { self.base.reset(); } fn get_confidence(&self) -> f32 { self.base.get_confidence() } fn got_enough_data(&self) -> bool { self.base.got_enough_data() } fn feed(&mut self, char: &[u8], char_len: usize) { let order: Option<usize>; if char_len == 2 { order = self.get_order(char); } else { order = None; } if order.is_some() { self.base.m_total_chars += 1; if order.unwrap() < self.base.m_table_size { let tmp = self.base.m_char_to_freq_order.unwrap(); if 512 > tmp[order.unwrap() as usize] { self.base.m_freq_chars += 1; } } } } fn get_order(&self, char: &[u8]) -> Option<usize> { if char.len() >= 2 { if char[0] >= 0xC4 { let order: isize = 94 * (char[0] as isize - 0xC4) + char[1] as isize - 0xA1; if order >= 0 { Some(order as usize) } else { None } } else { None } } else { None } } } pub struct EUCKRDistributionAnalysis<'a> { base: BaseCharDistributionAnalysis<'a>, } impl<'a> EUCKRDistributionAnalysis<'a> { pub fn new() -> EUCKRDistributionAnalysis<'a> { let mut x = EUCKRDistributionAnalysis { base: BaseCharDistributionAnalysis::new() }; x.base.m_char_to_freq_order = Some(EUCKR_CHAR_TO_FREQ_ORDER); x.base.m_table_size = EUCKR_TABLE_SIZE; x.base.m_typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO; x } } impl<'a> CharDistributionAnalysis for EUCKRDistributionAnalysis<'a> { fn reset(&mut self) { self.base.reset(); } fn get_confidence(&self) -> f32 { self.base.get_confidence() } fn got_enough_data(&self) -> bool { self.base.got_enough_data() } fn feed(&mut self, char: &[u8], char_len: usize) { let order: Option<usize>; if char_len == 2 { order = self.get_order(char); } else { order = None; } if order.is_some() { self.base.m_total_chars += 1; if order.unwrap() < self.base.m_table_size { let tmp = self.base.m_char_to_freq_order.unwrap(); if 512 > tmp[order.unwrap() as usize] { self.base.m_freq_chars += 1; } } } } fn get_order(&self, char: &[u8]) -> Option<usize> { if char.len() >= 2 { if char[0] >= 0xB0 { let order: isize = 94 * (char[0] as isize - 0xB0) + char[1] as isize - 0xA1; if order >= 0 { Some(order as usize) } else { None } } else { None } } else { None } } } pub struct GB2312DistributionAnalysis<'a> { base: BaseCharDistributionAnalysis<'a>, } impl<'a> GB2312DistributionAnalysis<'a> { pub fn new() -> GB2312DistributionAnalysis<'a> { let mut x = GB2312DistributionAnalysis { base: BaseCharDistributionAnalysis::new() }; x.base.m_char_to_freq_order = Some(GB2312_CHAR_TO_FREQ_ORDER); x.base.m_table_size = GB2312_TABLE_SIZE; x.base.m_typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO; x } } impl<'a> CharDistributionAnalysis for GB2312DistributionAnalysis<'a> { fn reset(&mut self) { self.base.reset(); } fn get_confidence(&self) -> f32 { self.base.get_confidence() } fn got_enough_data(&self) -> bool { self.base.got_enough_data() } fn feed(&mut self, char: &[u8], char_len: usize) { let order: Option<usize>; if char_len == 2 { order = self.get_order(char); } else { order = None; } if order.is_some() { self.base.m_total_chars += 1; if order.unwrap() < self.base.m_table_size { let tmp = self.base.m_char_to_freq_order.unwrap(); if 512 > tmp[order.unwrap() as usize] { self.base.m_freq_chars += 1; } } } } fn get_order(&self, char: &[u8]) -> Option<usize> { if char.len() >= 2 { if (char[0] >= 0xB0) && (char[1] >= 0xA1) { let order: isize = 94 * (char[0] as isize - 0xB0) + char[1] as isize - 0xA1; if order >= 0 { Some(order as usize) } else { None } } else { None } } else { None } } } pub struct Big5DistributionAnalysis<'a> { base: BaseCharDistributionAnalysis<'a>, } impl<'a> Big5DistributionAnalysis<'a> { pub fn new() -> Big5DistributionAnalysis<'a> { let mut x = Big5DistributionAnalysis { base: BaseCharDistributionAnalysis::new() }; x.base.m_char_to_freq_order = Some(BIG5_CHAR_TO_FREQ_ORDER); x.base.m_table_size = BIG5_TABLE_SIZE; x.base.m_typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO; x } } impl<'a> CharDistributionAnalysis for Big5DistributionAnalysis<'a> { fn reset(&mut self) { self.base.reset(); } fn get_confidence(&self) -> f32 { self.base.get_confidence() } fn got_enough_data(&self) -> bool { self.base.got_enough_data() } fn feed(&mut self, char: &[u8], char_len: usize) { let order: Option<usize>; if char_len == 2 { order = self.get_order(char); } else { order = None; } if order.is_some() { self.base.m_total_chars += 1; if order.unwrap() < self.base.m_table_size { let tmp = self.base.m_char_to_freq_order.unwrap(); if 512 > tmp[order.unwrap() as usize] { self.base.m_freq_chars += 1; } } } } fn get_order(&self, char: &[u8]) -> Option<usize> { if char.len() >= 2 { if char[0] >= 0xA4 { if char[1] >= 0xA1 { let order: isize = 157 * (char[0] as isize - 0xA4) + char[1] as isize + 63 - 0xA1; if order >= 0 { Some(order as usize) } else { None } } else { let order: isize = 157 * (char[0] as isize - 0xA4) + char[1] as isize - 0x40; if order >= 0 { Some(order as usize) } else { None } } } else { None } } else { None } } } pub struct SJISDistributionAnalysis<'a> { base: BaseCharDistributionAnalysis<'a>, } impl<'a> SJISDistributionAnalysis<'a> { pub fn new() -> SJISDistributionAnalysis<'a> { let mut x = SJISDistributionAnalysis { base: BaseCharDistributionAnalysis::new() }; x.base.m_char_to_freq_order = Some(JIS_CHAR_TO_FREQ_ORDER); x.base.m_table_size = JIS_TABLE_SIZE; x.base.m_typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO; x } } impl<'a> CharDistributionAnalysis for SJISDistributionAnalysis<'a> { fn reset(&mut self) { self.base.reset(); } fn get_confidence(&self) -> f32 { self.base.get_confidence() } fn got_enough_data(&self) -> bool { self.base.got_enough_data() } fn feed(&mut self, char: &[u8], char_len: usize) { let order: Option<usize>; if char_len == 2 { order = self.get_order(char); } else { order = None; } if order.is_some() { self.base.m_total_chars += 1; if order.unwrap() < self.base.m_table_size { let tmp = self.base.m_char_to_freq_order.unwrap(); if 512 > tmp[order.unwrap() as usize] { self.base.m_freq_chars += 1; } } } } fn get_order(&self, char: &[u8]) -> Option<usize> { if char.len() >= 2 { let mut order: isize; if (char[0] >= 0x81) && (char[0] <= 0x9F) { order = 188 * (char[0] as isize - 0x81); } else if (char[0] >= 0xE0) && (char[0] <= 0xEF) { order = 188 * (char[0] as isize - 0xE0 + 31); } else { return None; } order = order + char[1] as isize - 0x40; if char[1] > 0x7F { return None; } else { if order >= 0 { return Some(order as usize); } else { return None; } } } else { None } } } pub struct EUCJPDistributionAnalysis<'a> { base: BaseCharDistributionAnalysis<'a>, } impl<'a> EUCJPDistributionAnalysis<'a> { pub fn new() -> EUCJPDistributionAnalysis<'a> { let mut x = EUCJPDistributionAnalysis { base: BaseCharDistributionAnalysis::new() }; x.base.m_char_to_freq_order = Some(JIS_CHAR_TO_FREQ_ORDER); x.base.m_table_size = JIS_TABLE_SIZE; x.base.m_typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO; x } } impl<'a> CharDistributionAnalysis for EUCJPDistributionAnalysis<'a> { fn reset(&mut self) { self.base.reset(); } fn get_confidence(&self) -> f32 { self.base.get_confidence() } fn got_enough_data(&self) -> bool { self.base.got_enough_data() } fn feed(&mut self, char: &[u8], char_len: usize) { let order: Option<usize>; if char_len == 2 { order = self.get_order(char); } else { order = None; } if order.is_some() { self.base.m_total_chars += 1; if order.unwrap() < self.base.m_table_size { let tmp = self.base.m_char_to_freq_order.unwrap(); if 512 > tmp[order.unwrap() as usize] { self.base.m_freq_chars += 1; } } } } fn get_order(&self, char: &[u8]) -> Option<usize> { if char.len() >= 2 { if char[0] >= 0xA0 { let order:isize = 94 * (char[0] as isize - 0xA1) + char[1] as isize - 0xA1; if order >= 0 { Some(order as usize) } else { None } } else { None } } else { None } } }<|fim▁end|>
fn get_order(&self, _: &[u8]) -> Option<usize> {
<|file_name|>players.js<|end_file_name|><|fim▁begin|>var playersController = exports; exports.constructor = function playersController(){}; var _ = require('lodash'); var players = require('../sonos/players'); playersController.list = function(req, res, next) { players.client.find(function(err, players) { if (err) { return next(err); } res.send(players); }); }; playersController.get = function(req, res, next) { players.client.find(req.params.name, function(err, players) { if (err) { return next(err); } if (!players || players.length < 1) { err = new Error('The requested player could not be found'); err.type = 'not found'; return next(err); } // only return the one item res.send(players[0]); }); }; playersController.queue = function(req, res, next) { players.client.queue(req.params.name, req.query, function(err, queue) { if (err) { return next(err); } res.send({ roomName: req.params.name, currentIndex: queue.currentIndex, limit: queue.limit, offset: queue.offset, tracks: queue.tracks }); }); }; playersController.clearQueue = function(req, res, next) { players.client.clearQueue(req.params.name, function(err, queue) { if (err) { return next(err); } res.send({ roomName: req.params.name, currentIndex: queue.currentIndex, limit: queue.limit, offset: queue.offset, tracks: queue.tracks }); }); }; playersController.playlists = function(req, res, next) { players.client.playlists(req.params.name, function(err, playlists) { if (err) { return next(err); } res.send({ roomName: req.params.name, playlists: playlists }); }); }; playersController.action = function(req, res, next) { var action = req.params.action; var playerName = req.params.name; var opts = _.extend({}, req.body, req.query); <|fim▁hole|> players.client.action(playerName, action, opts, function(err, state) { if (err) { return next(err); } res.send(state); }); };<|fim▁end|>
<|file_name|>issue-21356.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. macro_rules! test { ($wrong:t_ty ..) => () } //~^ ERROR: invalid fragment specifier `t_ty`<|fim▁hole|><|fim▁end|>
fn main() {}
<|file_name|>string.pb.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 // protoc v3.19.1 // source: envoy/type/matcher/v3/string.proto package envoy_type_matcher_v3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 // Specifies the way to match a string. // [#next-free-field: 8] type StringMatcher struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to MatchPattern: // *StringMatcher_Exact // *StringMatcher_Prefix // *StringMatcher_Suffix // *StringMatcher_SafeRegex // *StringMatcher_Contains MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"` // If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This // has no effect for the safe_regex match. // For example, the matcher *data* will match both input string *Data* and *data* if set to true. IgnoreCase bool `protobuf:"varint,6,opt,name=ignore_case,json=ignoreCase,proto3" json:"ignore_case,omitempty"` } func (x *StringMatcher) Reset() { *x = StringMatcher{} if protoimpl.UnsafeEnabled { mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *StringMatcher) String() string { return protoimpl.X.MessageStringOf(x) } func (*StringMatcher) ProtoMessage() {} func (x *StringMatcher) ProtoReflect() protoreflect.Message { mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use StringMatcher.ProtoReflect.Descriptor instead. func (*StringMatcher) Descriptor() ([]byte, []int) { return file_envoy_type_matcher_v3_string_proto_rawDescGZIP(), []int{0} }<|fim▁hole|> if m != nil { return m.MatchPattern } return nil } func (x *StringMatcher) GetExact() string { if x, ok := x.GetMatchPattern().(*StringMatcher_Exact); ok { return x.Exact } return "" } func (x *StringMatcher) GetPrefix() string { if x, ok := x.GetMatchPattern().(*StringMatcher_Prefix); ok { return x.Prefix } return "" } func (x *StringMatcher) GetSuffix() string { if x, ok := x.GetMatchPattern().(*StringMatcher_Suffix); ok { return x.Suffix } return "" } func (x *StringMatcher) GetSafeRegex() *RegexMatcher { if x, ok := x.GetMatchPattern().(*StringMatcher_SafeRegex); ok { return x.SafeRegex } return nil } func (x *StringMatcher) GetContains() string { if x, ok := x.GetMatchPattern().(*StringMatcher_Contains); ok { return x.Contains } return "" } func (x *StringMatcher) GetIgnoreCase() bool { if x != nil { return x.IgnoreCase } return false } type isStringMatcher_MatchPattern interface { isStringMatcher_MatchPattern() } type StringMatcher_Exact struct { // The input string must match exactly the string specified here. // // Examples: // // * *abc* only matches the value *abc*. Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"` } type StringMatcher_Prefix struct { // The input string must have the prefix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *abc.xyz* Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"` } type StringMatcher_Suffix struct { // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *xyz.abc* Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"` } type StringMatcher_SafeRegex struct { // The input string must match the regular expression specified here. SafeRegex *RegexMatcher `protobuf:"bytes,5,opt,name=safe_regex,json=safeRegex,proto3,oneof"` } type StringMatcher_Contains struct { // The input string must have the substring specified here. // Note: empty contains match is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *xyz.abc.def* Contains string `protobuf:"bytes,7,opt,name=contains,proto3,oneof"` } func (*StringMatcher_Exact) isStringMatcher_MatchPattern() {} func (*StringMatcher_Prefix) isStringMatcher_MatchPattern() {} func (*StringMatcher_Suffix) isStringMatcher_MatchPattern() {} func (*StringMatcher_SafeRegex) isStringMatcher_MatchPattern() {} func (*StringMatcher_Contains) isStringMatcher_MatchPattern() {} // Specifies a list of ways to match a string. type ListStringMatcher struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Patterns []*StringMatcher `protobuf:"bytes,1,rep,name=patterns,proto3" json:"patterns,omitempty"` } func (x *ListStringMatcher) Reset() { *x = ListStringMatcher{} if protoimpl.UnsafeEnabled { mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListStringMatcher) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListStringMatcher) ProtoMessage() {} func (x *ListStringMatcher) ProtoReflect() protoreflect.Message { mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListStringMatcher.ProtoReflect.Descriptor instead. func (*ListStringMatcher) Descriptor() ([]byte, []int) { return file_envoy_type_matcher_v3_string_proto_rawDescGZIP(), []int{1} } func (x *ListStringMatcher) GetPatterns() []*StringMatcher { if x != nil { return x.Patterns } return nil } var File_envoy_type_matcher_v3_string_proto protoreflect.FileDescriptor var file_envoy_type_matcher_v3_string_proto_rawDesc = []byte{ 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x12, 0x21, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4e, 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x73, 0x65, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x8c, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x3c, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_envoy_type_matcher_v3_string_proto_rawDescOnce sync.Once file_envoy_type_matcher_v3_string_proto_rawDescData = file_envoy_type_matcher_v3_string_proto_rawDesc ) func file_envoy_type_matcher_v3_string_proto_rawDescGZIP() []byte { file_envoy_type_matcher_v3_string_proto_rawDescOnce.Do(func() { file_envoy_type_matcher_v3_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_string_proto_rawDescData) }) return file_envoy_type_matcher_v3_string_proto_rawDescData } var file_envoy_type_matcher_v3_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_envoy_type_matcher_v3_string_proto_goTypes = []interface{}{ (*StringMatcher)(nil), // 0: envoy.type.matcher.v3.StringMatcher (*ListStringMatcher)(nil), // 1: envoy.type.matcher.v3.ListStringMatcher (*RegexMatcher)(nil), // 2: envoy.type.matcher.v3.RegexMatcher } var file_envoy_type_matcher_v3_string_proto_depIdxs = []int32{ 2, // 0: envoy.type.matcher.v3.StringMatcher.safe_regex:type_name -> envoy.type.matcher.v3.RegexMatcher 0, // 1: envoy.type.matcher.v3.ListStringMatcher.patterns:type_name -> envoy.type.matcher.v3.StringMatcher 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name 2, // [2:2] is the sub-list for extension extendee 0, // [0:2] is the sub-list for field type_name } func init() { file_envoy_type_matcher_v3_string_proto_init() } func file_envoy_type_matcher_v3_string_proto_init() { if File_envoy_type_matcher_v3_string_proto != nil { return } file_envoy_type_matcher_v3_regex_proto_init() if !protoimpl.UnsafeEnabled { file_envoy_type_matcher_v3_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StringMatcher); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_type_matcher_v3_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListStringMatcher); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } file_envoy_type_matcher_v3_string_proto_msgTypes[0].OneofWrappers = []interface{}{ (*StringMatcher_Exact)(nil), (*StringMatcher_Prefix)(nil), (*StringMatcher_Suffix)(nil), (*StringMatcher_SafeRegex)(nil), (*StringMatcher_Contains)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_type_matcher_v3_string_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_envoy_type_matcher_v3_string_proto_goTypes, DependencyIndexes: file_envoy_type_matcher_v3_string_proto_depIdxs, MessageInfos: file_envoy_type_matcher_v3_string_proto_msgTypes, }.Build() File_envoy_type_matcher_v3_string_proto = out.File file_envoy_type_matcher_v3_string_proto_rawDesc = nil file_envoy_type_matcher_v3_string_proto_goTypes = nil file_envoy_type_matcher_v3_string_proto_depIdxs = nil }<|fim▁end|>
func (m *StringMatcher) GetMatchPattern() isStringMatcher_MatchPattern {
<|file_name|>create-stats-perfs-db.py<|end_file_name|><|fim▁begin|>from collections import defaultdict from zipfile import ZipFile from datetime import datetime from itertools import izip import logging import sys import shelve from backtest import constants def main(): PRICES_DATA = constants.PRICES_DATA performances = shelve.open(constants.CACHE_PERFS, protocol=2) with ZipFile(PRICES_DATA, 'r') as prices_data: securities = prices_data.namelist() for index, dataset_name in enumerate(securities): #if index == 100: break batch_count = index / 100 + 1 if index % 100 == 0: logging.info('processing batch %d/%d' % (batch_count, len(securities) / 100 + 1)) security_code = dataset_name.split('/')[-1][:-4] security_performances = dict() dataset = prices_data.open(dataset_name).readlines() dates = list() prices = list() for row in dataset: items = row.strip().split(',') px_date = datetime.strptime(items[0], '%Y-%m-%d') if items[4].startswith('#N/A'): continue px_last = float(items[4]) dates.append(px_date) prices.append(px_last) for date, price, price_prev in izip(dates[1:], prices[1:], prices[:-1]): perf = (price / price_prev) - 1.0 security_performances[date.strftime('%Y%m%d')] = perf performances[security_code] = security_performances <|fim▁hole|> logging.basicConfig( level=logging.DEBUG, format='%(levelname)s %(asctime)s %(module)s - %(message)s' ) main()<|fim▁end|>
performances.close() if __name__ == '__main__':
<|file_name|>amodule.py<|end_file_name|><|fim▁begin|>from txaws.server.method import Method from txaws.server.tests.fixtures import method<|fim▁hole|> @method class TestMethod(Method): pass<|fim▁end|>
<|file_name|>debug.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2002-2011 The DOSBox Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "dosbox.h" #if C_DEBUG #include <string.h> #include <list> #include <ctype.h> #include <fstream> #include <iomanip> #include <string> #include <sstream> using namespace std; #include "debug.h" #include "cross.h" //snprintf #include "cpu.h" #include "video.h" #include "pic.h" #include "mapper.h" #include "cpu.h" #include "callback.h" #include "inout.h" #include "mixer.h" #include "timer.h" #include "paging.h" #include "support.h" #include "shell.h" #include "programs.h" #include "debug_inc.h" #include "../cpu/lazyflags.h" #include "keyboard.h" #include "setup.h" #ifdef WIN32 void WIN32_Console(); #else #include <termios.h> #include <unistd.h> static struct termios consolesettings; #endif int old_cursor_state; // Forwards static void DrawCode(void); static void DEBUG_RaiseTimerIrq(void); static void SaveMemory(Bitu seg, Bitu ofs1, Bit32u num); static void SaveMemoryBin(Bitu seg, Bitu ofs1, Bit32u num); static void LogMCBS(void); static void LogGDT(void); static void LogLDT(void); static void LogIDT(void); static void LogPages(char* selname); static void LogCPUInfo(void); static void OutputVecTable(char* filename); static void DrawVariables(void); char* AnalyzeInstruction(char* inst, bool saveSelector); Bit32u GetHexValue(char* str, char*& hex); #if 0 class DebugPageHandler : public PageHandler { public: Bitu readb(PhysPt /*addr*/) { } Bitu readw(PhysPt /*addr*/) { } Bitu readd(PhysPt /*addr*/) { } void writeb(PhysPt /*addr*/,Bitu /*val*/) { } void writew(PhysPt /*addr*/,Bitu /*val*/) { } void writed(PhysPt /*addr*/,Bitu /*val*/) { } }; #endif class DEBUG; DEBUG* pDebugcom = 0; bool exitLoop = false; // Heavy Debugging Vars for logging #if C_HEAVY_DEBUG static ofstream cpuLogFile; static bool cpuLog = false; static int cpuLogCounter = 0; static int cpuLogType = 1; // log detail static bool zeroProtect = false; bool logHeavy = false; #endif static struct { Bit32u eax,ebx,ecx,edx,esi,edi,ebp,esp,eip; } oldregs; static char curSelectorName[3] = { 0,0,0 }; static Segment oldsegs[6]; static Bitu oldflags,oldcpucpl; DBGBlock dbg; Bitu cycle_count; static bool debugging; static void SetColor(Bitu test) { if (test) { if (has_colors()) { wattrset(dbg.win_reg,COLOR_PAIR(PAIR_BYELLOW_BLACK));} } else { if (has_colors()) { wattrset(dbg.win_reg,0);} } } #define MAXCMDLEN 254 struct SCodeViewData { int cursorPos; Bit16u firstInstSize; Bit16u useCS; Bit32u useEIPlast, useEIPmid; Bit32u useEIP; Bit16u cursorSeg; Bit32u cursorOfs; bool ovrMode; char inputStr[MAXCMDLEN+1]; char suspInputStr[MAXCMDLEN+1]; int inputPos; } codeViewData; static Bit16u dataSeg;<|fim▁hole|> static void ClearInputLine(void) { codeViewData.inputStr[0] = 0; codeViewData.inputPos = 0; } // History stuff #define MAX_HIST_BUFFER 50 static list<string> histBuff; static list<string>::iterator histBuffPos = histBuff.end(); /***********/ /* Helpers */ /***********/ Bit32u PhysMakeProt(Bit16u selector, Bit32u offset) { Descriptor desc; if (cpu.gdt.GetDescriptor(selector,desc)) return desc.GetBase()+offset; return 0; }; Bit32u GetAddress(Bit16u seg, Bit32u offset) { if (seg==SegValue(cs)) return SegPhys(cs)+offset; if (cpu.pmode && !(reg_flags & FLAG_VM)) { Descriptor desc; if (cpu.gdt.GetDescriptor(seg,desc)) return PhysMakeProt(seg,offset); } return (seg<<4)+offset; } static char empty_sel[] = { ' ',' ',0 }; bool GetDescriptorInfo(char* selname, char* out1, char* out2) { Bitu sel; Descriptor desc; if (strstr(selname,"cs") || strstr(selname,"CS")) sel = SegValue(cs); else if (strstr(selname,"ds") || strstr(selname,"DS")) sel = SegValue(ds); else if (strstr(selname,"es") || strstr(selname,"ES")) sel = SegValue(es); else if (strstr(selname,"fs") || strstr(selname,"FS")) sel = SegValue(fs); else if (strstr(selname,"gs") || strstr(selname,"GS")) sel = SegValue(gs); else if (strstr(selname,"ss") || strstr(selname,"SS")) sel = SegValue(ss); else { sel = GetHexValue(selname,selname); if (*selname==0) selname=empty_sel; } if (cpu.gdt.GetDescriptor(sel,desc)) { switch (desc.Type()) { case DESC_TASK_GATE: sprintf(out1,"%s: s:%08X type:%02X p",selname,desc.GetSelector(),desc.desc.gate.type); sprintf(out2," TaskGate dpl : %01X %1X",desc.desc.gate.dpl,desc.desc.gate.p); return true; case DESC_LDT: case DESC_286_TSS_A: case DESC_286_TSS_B: case DESC_386_TSS_A: case DESC_386_TSS_B: sprintf(out1,"%s: b:%08X type:%02X pag",selname,desc.GetBase(),desc.desc.seg.type); sprintf(out2," l:%08X dpl : %01X %1X%1X%1X",desc.GetLimit(),desc.desc.seg.dpl,desc.desc.seg.p,desc.desc.seg.avl,desc.desc.seg.g); return true; case DESC_286_CALL_GATE: case DESC_386_CALL_GATE: sprintf(out1,"%s: s:%08X type:%02X p params: %02X",selname,desc.GetSelector(),desc.desc.gate.type,desc.desc.gate.paramcount); sprintf(out2," o:%08X dpl : %01X %1X",desc.GetOffset(),desc.desc.gate.dpl,desc.desc.gate.p); return true; case DESC_286_INT_GATE: case DESC_286_TRAP_GATE: case DESC_386_INT_GATE: case DESC_386_TRAP_GATE: sprintf(out1,"%s: s:%08X type:%02X p",selname,desc.GetSelector(),desc.desc.gate.type); sprintf(out2," o:%08X dpl : %01X %1X",desc.GetOffset(),desc.desc.gate.dpl,desc.desc.gate.p); return true; } sprintf(out1,"%s: b:%08X type:%02X parbg",selname,desc.GetBase(),desc.desc.seg.type); sprintf(out2," l:%08X dpl : %01X %1X%1X%1X%1X%1X",desc.GetLimit(),desc.desc.seg.dpl,desc.desc.seg.p,desc.desc.seg.avl,desc.desc.seg.r,desc.desc.seg.big,desc.desc.seg.g); return true; } else { strcpy(out1," "); strcpy(out2," "); } return false; }; /********************/ /* DebugVar stuff */ /********************/ class CDebugVar { public: CDebugVar(char* _name, PhysPt _adr) { adr=_adr; safe_strncpy(name,_name,16); }; char* GetName(void) { return name; }; PhysPt GetAdr (void) { return adr; }; private: PhysPt adr; char name[16]; public: static void InsertVariable (char* name, PhysPt adr); static CDebugVar* FindVar (PhysPt adr); static void DeleteAll (); static bool SaveVars (char* name); static bool LoadVars (char* name); static std::list<CDebugVar*> varList; }; std::list<CDebugVar*> CDebugVar::varList; /********************/ /* Breakpoint stuff */ /********************/ bool skipFirstInstruction = false; enum EBreakpoint { BKPNT_UNKNOWN, BKPNT_PHYSICAL, BKPNT_INTERRUPT, BKPNT_MEMORY, BKPNT_MEMORY_PROT, BKPNT_MEMORY_LINEAR }; #define BPINT_ALL 0x100 class CBreakpoint { public: CBreakpoint(void); void SetAddress (Bit16u seg, Bit32u off) { location = GetAddress(seg,off); type = BKPNT_PHYSICAL; segment = seg; offset = off; }; void SetAddress (PhysPt adr) { location = adr; type = BKPNT_PHYSICAL; }; void SetInt (Bit8u _intNr, Bit16u ah) { intNr = _intNr, ahValue = ah; type = BKPNT_INTERRUPT; }; void SetOnce (bool _once) { once = _once; }; void SetType (EBreakpoint _type) { type = _type; }; void SetValue (Bit8u value) { ahValue = value; }; bool IsActive (void) { return active; }; void Activate (bool _active); EBreakpoint GetType (void) { return type; }; bool GetOnce (void) { return once; }; PhysPt GetLocation (void) { if (GetType()!=BKPNT_INTERRUPT) return location; else return 0; }; Bit16u GetSegment (void) { return segment; }; Bit32u GetOffset (void) { return offset; }; Bit8u GetIntNr (void) { if (GetType()==BKPNT_INTERRUPT) return intNr; else return 0; }; Bit16u GetValue (void) { if (GetType()!=BKPNT_PHYSICAL) return ahValue; else return 0; }; // statics static CBreakpoint* AddBreakpoint (Bit16u seg, Bit32u off, bool once); static CBreakpoint* AddIntBreakpoint (Bit8u intNum, Bit16u ah, bool once); static CBreakpoint* AddMemBreakpoint (Bit16u seg, Bit32u off); static void ActivateBreakpoints (PhysPt adr, bool activate); static bool CheckBreakpoint (PhysPt adr); static bool CheckBreakpoint (Bitu seg, Bitu off); static bool CheckIntBreakpoint (PhysPt adr, Bit8u intNr, Bit16u ahValue); static bool IsBreakpoint (PhysPt where); static bool IsBreakpointDrawn (PhysPt where); static bool DeleteBreakpoint (PhysPt where); static bool DeleteByIndex (Bit16u index); static void DeleteAll (void); static void ShowList (void); private: EBreakpoint type; // Physical PhysPt location; Bit8u oldData; Bit16u segment; Bit32u offset; // Int Bit8u intNr; Bit16u ahValue; // Shared bool active; bool once; static std::list<CBreakpoint*> BPoints; public: static CBreakpoint* ignoreOnce; }; CBreakpoint::CBreakpoint(void): location(0), active(false),once(false), segment(0),offset(0),intNr(0),ahValue(0), type(BKPNT_UNKNOWN) { }; void CBreakpoint::Activate(bool _active) { #if !C_HEAVY_DEBUG if (GetType()==BKPNT_PHYSICAL) { if (_active) { // Set 0xCC and save old value Bit8u data = mem_readb(location); if (data!=0xCC) { oldData = data; mem_writeb(location,0xCC); }; } else { // Remove 0xCC and set old value if (mem_readb (location)==0xCC) { mem_writeb(location,oldData); }; } } #endif active = _active; }; // Statics std::list<CBreakpoint*> CBreakpoint::BPoints; CBreakpoint* CBreakpoint::ignoreOnce = 0; Bitu ignoreAddressOnce = 0; CBreakpoint* CBreakpoint::AddBreakpoint(Bit16u seg, Bit32u off, bool once) { CBreakpoint* bp = new CBreakpoint(); bp->SetAddress (seg,off); bp->SetOnce (once); BPoints.push_front (bp); return bp; }; CBreakpoint* CBreakpoint::AddIntBreakpoint(Bit8u intNum, Bit16u ah, bool once) { CBreakpoint* bp = new CBreakpoint(); bp->SetInt (intNum,ah); bp->SetOnce (once); BPoints.push_front (bp); return bp; }; CBreakpoint* CBreakpoint::AddMemBreakpoint(Bit16u seg, Bit32u off) { CBreakpoint* bp = new CBreakpoint(); bp->SetAddress (seg,off); bp->SetOnce (false); bp->SetType (BKPNT_MEMORY); BPoints.push_front (bp); return bp; }; void CBreakpoint::ActivateBreakpoints(PhysPt adr, bool activate) { // activate all breakpoints std::list<CBreakpoint*>::iterator i; CBreakpoint* bp; for(i=BPoints.begin(); i != BPoints.end(); i++) { bp = (*i); // Do not activate, when bp is an actual address if (activate && (bp->GetType()==BKPNT_PHYSICAL) && (bp->GetLocation()==adr)) { // Do not activate :) continue; } bp->Activate(activate); }; }; bool CBreakpoint::CheckBreakpoint(Bitu seg, Bitu off) // Checks if breakpoint is valid and should stop execution { if ((ignoreAddressOnce!=0) && (GetAddress(seg,off)==ignoreAddressOnce)) { ignoreAddressOnce = 0; return false; } else ignoreAddressOnce = 0; // Search matching breakpoint std::list<CBreakpoint*>::iterator i; CBreakpoint* bp; for(i=BPoints.begin(); i != BPoints.end(); i++) { bp = (*i); if ((bp->GetType()==BKPNT_PHYSICAL) && bp->IsActive() && (bp->GetSegment()==seg) && (bp->GetOffset()==off)) { // Ignore Once ? if (ignoreOnce==bp) { ignoreOnce=0; bp->Activate(true); return false; }; // Found, if (bp->GetOnce()) { // delete it, if it should only be used once (BPoints.erase)(i); bp->Activate(false); delete bp; } else { ignoreOnce = bp; }; return true; } #if C_HEAVY_DEBUG // Memory breakpoint support else if (bp->IsActive()) { if ((bp->GetType()==BKPNT_MEMORY) || (bp->GetType()==BKPNT_MEMORY_PROT) || (bp->GetType()==BKPNT_MEMORY_LINEAR)) { // Watch Protected Mode Memoryonly in pmode if (bp->GetType()==BKPNT_MEMORY_PROT) { // Check if pmode is active if (!cpu.pmode) return false; // Check if descriptor is valid Descriptor desc; if (!cpu.gdt.GetDescriptor(bp->GetSegment(),desc)) return false; if (desc.GetLimit()==0) return false; } Bitu address; if (bp->GetType()==BKPNT_MEMORY_LINEAR) address = bp->GetOffset(); else address = GetAddress(bp->GetSegment(),bp->GetOffset()); Bit8u value=0; if (mem_readb_checked(address,&value)) return false; if (bp->GetValue() != value) { // Yup, memory value changed DEBUG_ShowMsg("DEBUG: Memory breakpoint %s: %04X:%04X - %02X -> %02X\n",(bp->GetType()==BKPNT_MEMORY_PROT)?"(Prot)":"",bp->GetSegment(),bp->GetOffset(),bp->GetValue(),value); bp->SetValue(value); return true; }; } }; #endif }; return false; }; bool CBreakpoint::CheckIntBreakpoint(PhysPt adr, Bit8u intNr, Bit16u ahValue) // Checks if interrupt breakpoint is valid and should stop execution { if ((ignoreAddressOnce!=0) && (adr==ignoreAddressOnce)) { ignoreAddressOnce = 0; return false; } else ignoreAddressOnce = 0; // Search matching breakpoint std::list<CBreakpoint*>::iterator i; CBreakpoint* bp; for(i=BPoints.begin(); i != BPoints.end(); i++) { bp = (*i); if ((bp->GetType()==BKPNT_INTERRUPT) && bp->IsActive() && (bp->GetIntNr()==intNr)) { if ((bp->GetValue()==BPINT_ALL) || (bp->GetValue()==ahValue)) { // Ignore it once ? if (ignoreOnce==bp) { ignoreOnce=0; bp->Activate(true); return false; }; // Found if (bp->GetOnce()) { // delete it, if it should only be used once (BPoints.erase)(i); bp->Activate(false); delete bp; } else { ignoreOnce = bp; } return true; } }; }; return false; }; void CBreakpoint::DeleteAll() { std::list<CBreakpoint*>::iterator i; CBreakpoint* bp; for(i=BPoints.begin(); i != BPoints.end(); i++) { bp = (*i); bp->Activate(false); delete bp; }; (BPoints.clear)(); }; bool CBreakpoint::DeleteByIndex(Bit16u index) { // Search matching breakpoint int nr = 0; std::list<CBreakpoint*>::iterator i; CBreakpoint* bp; for(i=BPoints.begin(); i != BPoints.end(); i++) { if (nr==index) { bp = (*i); (BPoints.erase)(i); bp->Activate(false); delete bp; return true; } nr++; }; return false; }; bool CBreakpoint::DeleteBreakpoint(PhysPt where) { // Search matching breakpoint std::list<CBreakpoint*>::iterator i; CBreakpoint* bp; for(i=BPoints.begin(); i != BPoints.end(); i++) { bp = (*i); if ((bp->GetType()==BKPNT_PHYSICAL) && (bp->GetLocation()==where)) { (BPoints.erase)(i); bp->Activate(false); delete bp; return true; } }; return false; }; bool CBreakpoint::IsBreakpoint(PhysPt adr) // is there a breakpoint at address ? { // Search matching breakpoint std::list<CBreakpoint*>::iterator i; CBreakpoint* bp; for(i=BPoints.begin(); i != BPoints.end(); i++) { bp = (*i); if ((bp->GetType()==BKPNT_PHYSICAL) && (bp->GetSegment()==adr)) { return true; }; if ((bp->GetType()==BKPNT_PHYSICAL) && (bp->GetLocation()==adr)) { return true; }; }; return false; }; bool CBreakpoint::IsBreakpointDrawn(PhysPt adr) // valid breakpoint, that should be drawn ? { // Search matching breakpoint std::list<CBreakpoint*>::iterator i; CBreakpoint* bp; for(i=BPoints.begin(); i != BPoints.end(); i++) { bp = (*i); if ((bp->GetType()==BKPNT_PHYSICAL) && (bp->GetLocation()==adr)) { // Only draw, if breakpoint is not only once, return !bp->GetOnce(); }; }; return false; }; void CBreakpoint::ShowList(void) { // iterate list int nr = 0; std::list<CBreakpoint*>::iterator i; for(i=BPoints.begin(); i != BPoints.end(); i++) { CBreakpoint* bp = (*i); if (bp->GetType()==BKPNT_PHYSICAL) { DEBUG_ShowMsg("%02X. BP %04X:%04X\n",nr,bp->GetSegment(),bp->GetOffset()); } else if (bp->GetType()==BKPNT_INTERRUPT) { if (bp->GetValue()==BPINT_ALL) DEBUG_ShowMsg("%02X. BPINT %02X\n",nr,bp->GetIntNr()); else DEBUG_ShowMsg("%02X. BPINT %02X AH=%02X\n",nr,bp->GetIntNr(),bp->GetValue()); } else if (bp->GetType()==BKPNT_MEMORY) { DEBUG_ShowMsg("%02X. BPMEM %04X:%04X (%02X)\n",nr,bp->GetSegment(),bp->GetOffset(),bp->GetValue()); } else if (bp->GetType()==BKPNT_MEMORY_PROT) { DEBUG_ShowMsg("%02X. BPPM %04X:%08X (%02X)\n",nr,bp->GetSegment(),bp->GetOffset(),bp->GetValue()); } else if (bp->GetType()==BKPNT_MEMORY_LINEAR ) { DEBUG_ShowMsg("%02X. BPLM %08X (%02X)\n",nr,bp->GetOffset(),bp->GetValue()); }; nr++; } }; bool DEBUG_Breakpoint(void) { /* First get the phyiscal address and check for a set Breakpoint */ if (!CBreakpoint::CheckBreakpoint(SegValue(cs),reg_eip)) return false; // Found. Breakpoint is valid PhysPt where=GetAddress(SegValue(cs),reg_eip); CBreakpoint::ActivateBreakpoints(where,false); // Deactivate all breakpoints return true; }; bool DEBUG_IntBreakpoint(Bit8u intNum) { /* First get the phyiscal address and check for a set Breakpoint */ PhysPt where=GetAddress(SegValue(cs),reg_eip); if (!CBreakpoint::CheckIntBreakpoint(where,intNum,reg_ah)) return false; // Found. Breakpoint is valid CBreakpoint::ActivateBreakpoints(where,false); // Deactivate all breakpoints return true; }; static bool StepOver() { exitLoop = false; PhysPt start=GetAddress(SegValue(cs),reg_eip); char dline[200];Bitu size; size=DasmI386(dline, start, reg_eip, cpu.code.big); if (strstr(dline,"call") || strstr(dline,"int") || strstr(dline,"loop") || strstr(dline,"rep")) { CBreakpoint::AddBreakpoint (SegValue(cs),reg_eip+size, true); CBreakpoint::ActivateBreakpoints(start, true); debugging=false; DrawCode(); DOSBOX_SetNormalLoop(); return true; } return false; }; bool DEBUG_ExitLoop(void) { #if C_HEAVY_DEBUG DrawVariables(); #endif if (exitLoop) { exitLoop = false; return true; } return false; }; /********************/ /* Draw windows */ /********************/ static void DrawData(void) { Bit8u ch; Bit32u add = dataOfs; Bit32u address; /* Data win */ for (int y=0; y<8; y++) { // Address if (add<0x10000) mvwprintw (dbg.win_data,1+y,0,"%04X:%04X ",dataSeg,add); else mvwprintw (dbg.win_data,1+y,0,"%04X:%08X ",dataSeg,add); for (int x=0; x<16; x++) { address = GetAddress(dataSeg,add); if (mem_readb_checked(address,&ch)) ch=0; mvwprintw (dbg.win_data,1+y,14+3*x,"%02X",ch); if (ch<32 || !isprint(*reinterpret_cast<unsigned char*>(&ch))) ch='.'; mvwprintw (dbg.win_data,1+y,63+x,"%c",ch); add++; }; } wrefresh(dbg.win_data); }; static void DrawRegisters(void) { /* Main Registers */ SetColor(reg_eax!=oldregs.eax);oldregs.eax=reg_eax;mvwprintw (dbg.win_reg,0,4,"%08X",reg_eax); SetColor(reg_ebx!=oldregs.ebx);oldregs.ebx=reg_ebx;mvwprintw (dbg.win_reg,1,4,"%08X",reg_ebx); SetColor(reg_ecx!=oldregs.ecx);oldregs.ecx=reg_ecx;mvwprintw (dbg.win_reg,2,4,"%08X",reg_ecx); SetColor(reg_edx!=oldregs.edx);oldregs.edx=reg_edx;mvwprintw (dbg.win_reg,3,4,"%08X",reg_edx); SetColor(reg_esi!=oldregs.esi);oldregs.esi=reg_esi;mvwprintw (dbg.win_reg,0,18,"%08X",reg_esi); SetColor(reg_edi!=oldregs.edi);oldregs.edi=reg_edi;mvwprintw (dbg.win_reg,1,18,"%08X",reg_edi); SetColor(reg_ebp!=oldregs.ebp);oldregs.ebp=reg_ebp;mvwprintw (dbg.win_reg,2,18,"%08X",reg_ebp); SetColor(reg_esp!=oldregs.esp);oldregs.esp=reg_esp;mvwprintw (dbg.win_reg,3,18,"%08X",reg_esp); SetColor(reg_eip!=oldregs.eip);oldregs.eip=reg_eip;mvwprintw (dbg.win_reg,1,42,"%08X",reg_eip); SetColor(SegValue(ds)!=oldsegs[ds].val);oldsegs[ds].val=SegValue(ds);mvwprintw (dbg.win_reg,0,31,"%04X",SegValue(ds)); SetColor(SegValue(es)!=oldsegs[es].val);oldsegs[es].val=SegValue(es);mvwprintw (dbg.win_reg,0,41,"%04X",SegValue(es)); SetColor(SegValue(fs)!=oldsegs[fs].val);oldsegs[fs].val=SegValue(fs);mvwprintw (dbg.win_reg,0,51,"%04X",SegValue(fs)); SetColor(SegValue(gs)!=oldsegs[gs].val);oldsegs[gs].val=SegValue(gs);mvwprintw (dbg.win_reg,0,61,"%04X",SegValue(gs)); SetColor(SegValue(ss)!=oldsegs[ss].val);oldsegs[ss].val=SegValue(ss);mvwprintw (dbg.win_reg,0,71,"%04X",SegValue(ss)); SetColor(SegValue(cs)!=oldsegs[cs].val);oldsegs[cs].val=SegValue(cs);mvwprintw (dbg.win_reg,1,31,"%04X",SegValue(cs)); /*Individual flags*/ Bitu changed_flags = reg_flags ^ oldflags; oldflags = reg_flags; SetColor(changed_flags&FLAG_CF); mvwprintw (dbg.win_reg,1,53,"%01X",GETFLAG(CF) ? 1:0); SetColor(changed_flags&FLAG_ZF); mvwprintw (dbg.win_reg,1,56,"%01X",GETFLAG(ZF) ? 1:0); SetColor(changed_flags&FLAG_SF); mvwprintw (dbg.win_reg,1,59,"%01X",GETFLAG(SF) ? 1:0); SetColor(changed_flags&FLAG_OF); mvwprintw (dbg.win_reg,1,62,"%01X",GETFLAG(OF) ? 1:0); SetColor(changed_flags&FLAG_AF); mvwprintw (dbg.win_reg,1,65,"%01X",GETFLAG(AF) ? 1:0); SetColor(changed_flags&FLAG_PF); mvwprintw (dbg.win_reg,1,68,"%01X",GETFLAG(PF) ? 1:0); SetColor(changed_flags&FLAG_DF); mvwprintw (dbg.win_reg,1,71,"%01X",GETFLAG(DF) ? 1:0); SetColor(changed_flags&FLAG_IF); mvwprintw (dbg.win_reg,1,74,"%01X",GETFLAG(IF) ? 1:0); SetColor(changed_flags&FLAG_TF); mvwprintw (dbg.win_reg,1,77,"%01X",GETFLAG(TF) ? 1:0); SetColor(changed_flags&FLAG_IOPL); mvwprintw (dbg.win_reg,2,72,"%01X",GETFLAG(IOPL)>>12); SetColor(cpu.cpl ^ oldcpucpl); mvwprintw (dbg.win_reg,2,78,"%01X",cpu.cpl); oldcpucpl=cpu.cpl; if (cpu.pmode) { if (reg_flags & FLAG_VM) mvwprintw(dbg.win_reg,0,76,"VM86"); else if (cpu.code.big) mvwprintw(dbg.win_reg,0,76,"Pr32"); else mvwprintw(dbg.win_reg,0,76,"Pr16"); } else mvwprintw(dbg.win_reg,0,76,"Real"); // Selector info, if available if ((cpu.pmode) && curSelectorName[0]) { char out1[200], out2[200]; GetDescriptorInfo(curSelectorName,out1,out2); mvwprintw(dbg.win_reg,2,28,out1); mvwprintw(dbg.win_reg,3,28,out2); } wattrset(dbg.win_reg,0); mvwprintw(dbg.win_reg,3,60,"%u ",cycle_count); wrefresh(dbg.win_reg); }; static void DrawCode(void) { bool saveSel; Bit32u disEIP = codeViewData.useEIP; PhysPt start = GetAddress(codeViewData.useCS,codeViewData.useEIP); char dline[200];Bitu size;Bitu c; static char line20[21] = " "; for (int i=0;i<10;i++) { saveSel = false; if (has_colors()) { if ((codeViewData.useCS==SegValue(cs)) && (disEIP == reg_eip)) { wattrset(dbg.win_code,COLOR_PAIR(PAIR_GREEN_BLACK)); if (codeViewData.cursorPos==-1) { codeViewData.cursorPos = i; // Set Cursor } if (i == codeViewData.cursorPos) { codeViewData.cursorSeg = SegValue(cs); codeViewData.cursorOfs = disEIP; } saveSel = (i == codeViewData.cursorPos); } else if (i == codeViewData.cursorPos) { wattrset(dbg.win_code,COLOR_PAIR(PAIR_BLACK_GREY)); codeViewData.cursorSeg = codeViewData.useCS; codeViewData.cursorOfs = disEIP; saveSel = true; } else if (CBreakpoint::IsBreakpointDrawn(start)) { wattrset(dbg.win_code,COLOR_PAIR(PAIR_GREY_RED)); } else { wattrset(dbg.win_code,0); } } Bitu drawsize=size=DasmI386(dline, start, disEIP, cpu.code.big); bool toolarge = false; mvwprintw(dbg.win_code,i,0,"%04X:%04X ",codeViewData.useCS,disEIP); if (drawsize>10) { toolarge = true; drawsize = 9; }; for (c=0;c<drawsize;c++) { Bit8u value; if (mem_readb_checked(start+c,&value)) value=0; wprintw(dbg.win_code,"%02X",value); } if (toolarge) { waddstr(dbg.win_code,".."); drawsize++; }; // Spacepad up to 20 characters if(drawsize && (drawsize < 11)) { line20[20 - drawsize*2] = 0; waddstr(dbg.win_code,line20); line20[20 - drawsize*2] = ' '; } else waddstr(dbg.win_code,line20); char empty_res[] = { 0 }; char* res = empty_res; if (showExtend) res = AnalyzeInstruction(dline, saveSel); // Spacepad it up to 28 characters size_t dline_len = strlen(dline); if(dline_len < 28) for (c = dline_len; c < 28;c++) dline[c] = ' '; dline[28] = 0; waddstr(dbg.win_code,dline); // Spacepad it up to 20 characters size_t res_len = strlen(res); if(res_len && (res_len < 21)) { waddstr(dbg.win_code,res); line20[20-res_len] = 0; waddstr(dbg.win_code,line20); line20[20-res_len] = ' '; } else waddstr(dbg.win_code,line20); start+=size; disEIP+=size; if (i==0) codeViewData.firstInstSize = size; if (i==4) codeViewData.useEIPmid = disEIP; } codeViewData.useEIPlast = disEIP; wattrset(dbg.win_code,0); if (!debugging) { mvwprintw(dbg.win_code,10,0,"%s","(Running)"); wclrtoeol(dbg.win_code); } else { //TODO long lines char* dispPtr = codeViewData.inputStr; char* curPtr = &codeViewData.inputStr[codeViewData.inputPos]; mvwprintw(dbg.win_code,10,0,"%c-> %s%c", (codeViewData.ovrMode?'O':'I'),dispPtr,(*curPtr?' ':'_')); wclrtoeol(dbg.win_code); // not correct in pdcurses if full line if (*curPtr) { mvwchgat(dbg.win_code,10,(curPtr-dispPtr+4),1,0,(PAIR_BLACK_GREY),NULL); } } wrefresh(dbg.win_code); } static void SetCodeWinStart() { if ((SegValue(cs)==codeViewData.useCS) && (reg_eip>=codeViewData.useEIP) && (reg_eip<=codeViewData.useEIPlast)) { // in valid window - scroll ? if (reg_eip>=codeViewData.useEIPmid) codeViewData.useEIP += codeViewData.firstInstSize; } else { // totally out of range. codeViewData.useCS = SegValue(cs); codeViewData.useEIP = reg_eip; } codeViewData.cursorPos = -1; // Recalc Cursor position }; /********************/ /* User input */ /********************/ Bit32u GetHexValue(char* str, char*& hex) { Bit32u value = 0; Bit32u regval = 0; hex = str; while (*hex==' ') hex++; if (strstr(hex,"EAX")==hex) { hex+=3; regval = reg_eax; }; if (strstr(hex,"EBX")==hex) { hex+=3; regval = reg_ebx; }; if (strstr(hex,"ECX")==hex) { hex+=3; regval = reg_ecx; }; if (strstr(hex,"EDX")==hex) { hex+=3; regval = reg_edx; }; if (strstr(hex,"ESI")==hex) { hex+=3; regval = reg_esi; }; if (strstr(hex,"EDI")==hex) { hex+=3; regval = reg_edi; }; if (strstr(hex,"EBP")==hex) { hex+=3; regval = reg_ebp; }; if (strstr(hex,"ESP")==hex) { hex+=3; regval = reg_esp; }; if (strstr(hex,"EIP")==hex) { hex+=3; regval = reg_eip; }; if (strstr(hex,"AX")==hex) { hex+=2; regval = reg_ax; }; if (strstr(hex,"BX")==hex) { hex+=2; regval = reg_bx; }; if (strstr(hex,"CX")==hex) { hex+=2; regval = reg_cx; }; if (strstr(hex,"DX")==hex) { hex+=2; regval = reg_dx; }; if (strstr(hex,"SI")==hex) { hex+=2; regval = reg_si; }; if (strstr(hex,"DI")==hex) { hex+=2; regval = reg_di; }; if (strstr(hex,"BP")==hex) { hex+=2; regval = reg_bp; }; if (strstr(hex,"SP")==hex) { hex+=2; regval = reg_sp; }; if (strstr(hex,"IP")==hex) { hex+=2; regval = reg_ip; }; if (strstr(hex,"CS")==hex) { hex+=2; regval = SegValue(cs); }; if (strstr(hex,"DS")==hex) { hex+=2; regval = SegValue(ds); }; if (strstr(hex,"ES")==hex) { hex+=2; regval = SegValue(es); }; if (strstr(hex,"FS")==hex) { hex+=2; regval = SegValue(fs); }; if (strstr(hex,"GS")==hex) { hex+=2; regval = SegValue(gs); }; if (strstr(hex,"SS")==hex) { hex+=2; regval = SegValue(ss); }; while (*hex) { if ((*hex>='0') && (*hex<='9')) value = (value<<4)+*hex-'0'; else if ((*hex>='A') && (*hex<='F')) value = (value<<4)+*hex-'A'+10; else { if(*hex == '+') {hex++;return regval + value + GetHexValue(hex,hex); }; if(*hex == '-') {hex++;return regval + value - GetHexValue(hex,hex); }; break; // No valid char } hex++; }; return regval + value; }; bool ChangeRegister(char* str) { char* hex = str; while (*hex==' ') hex++; if (strstr(hex,"EAX")==hex) { hex+=3; reg_eax = GetHexValue(hex,hex); } else if (strstr(hex,"EBX")==hex) { hex+=3; reg_ebx = GetHexValue(hex,hex); } else if (strstr(hex,"ECX")==hex) { hex+=3; reg_ecx = GetHexValue(hex,hex); } else if (strstr(hex,"EDX")==hex) { hex+=3; reg_edx = GetHexValue(hex,hex); } else if (strstr(hex,"ESI")==hex) { hex+=3; reg_esi = GetHexValue(hex,hex); } else if (strstr(hex,"EDI")==hex) { hex+=3; reg_edi = GetHexValue(hex,hex); } else if (strstr(hex,"EBP")==hex) { hex+=3; reg_ebp = GetHexValue(hex,hex); } else if (strstr(hex,"ESP")==hex) { hex+=3; reg_esp = GetHexValue(hex,hex); } else if (strstr(hex,"EIP")==hex) { hex+=3; reg_eip = GetHexValue(hex,hex); } else if (strstr(hex,"AX")==hex) { hex+=2; reg_ax = (Bit16u)GetHexValue(hex,hex); } else if (strstr(hex,"BX")==hex) { hex+=2; reg_bx = (Bit16u)GetHexValue(hex,hex); } else if (strstr(hex,"CX")==hex) { hex+=2; reg_cx = (Bit16u)GetHexValue(hex,hex); } else if (strstr(hex,"DX")==hex) { hex+=2; reg_dx = (Bit16u)GetHexValue(hex,hex); } else if (strstr(hex,"SI")==hex) { hex+=2; reg_si = (Bit16u)GetHexValue(hex,hex); } else if (strstr(hex,"DI")==hex) { hex+=2; reg_di = (Bit16u)GetHexValue(hex,hex); } else if (strstr(hex,"BP")==hex) { hex+=2; reg_bp = (Bit16u)GetHexValue(hex,hex); } else if (strstr(hex,"SP")==hex) { hex+=2; reg_sp = (Bit16u)GetHexValue(hex,hex); } else if (strstr(hex,"IP")==hex) { hex+=2; reg_ip = (Bit16u)GetHexValue(hex,hex); } else if (strstr(hex,"CS")==hex) { hex+=2; SegSet16(cs,(Bit16u)GetHexValue(hex,hex)); } else if (strstr(hex,"DS")==hex) { hex+=2; SegSet16(ds,(Bit16u)GetHexValue(hex,hex)); } else if (strstr(hex,"ES")==hex) { hex+=2; SegSet16(es,(Bit16u)GetHexValue(hex,hex)); } else if (strstr(hex,"FS")==hex) { hex+=2; SegSet16(fs,(Bit16u)GetHexValue(hex,hex)); } else if (strstr(hex,"GS")==hex) { hex+=2; SegSet16(gs,(Bit16u)GetHexValue(hex,hex)); } else if (strstr(hex,"SS")==hex) { hex+=2; SegSet16(ss,(Bit16u)GetHexValue(hex,hex)); } else if (strstr(hex,"AF")==hex) { hex+=2; SETFLAGBIT(AF,GetHexValue(hex,hex)); } else if (strstr(hex,"CF")==hex) { hex+=2; SETFLAGBIT(CF,GetHexValue(hex,hex)); } else if (strstr(hex,"DF")==hex) { hex+=2; SETFLAGBIT(DF,GetHexValue(hex,hex)); } else if (strstr(hex,"IF")==hex) { hex+=2; SETFLAGBIT(IF,GetHexValue(hex,hex)); } else if (strstr(hex,"OF")==hex) { hex+=2; SETFLAGBIT(OF,GetHexValue(hex,hex)); } else if (strstr(hex,"ZF")==hex) { hex+=2; SETFLAGBIT(ZF,GetHexValue(hex,hex)); } else if (strstr(hex,"PF")==hex) { hex+=2; SETFLAGBIT(PF,GetHexValue(hex,hex)); } else if (strstr(hex,"SF")==hex) { hex+=2; SETFLAGBIT(SF,GetHexValue(hex,hex)); } else { return false; }; return true; }; bool ParseCommand(char* str) { char* found = str; for(char* idx = found;*idx != 0; idx++) *idx = toupper(*idx); found = trim(found); string s_found(found); istringstream stream(s_found); string command; stream >> command; string::size_type next = s_found.find_first_not_of(' ',command.size()); if(next == string::npos) next = command.size(); (s_found.erase)(0,next); found = const_cast<char*>(s_found.c_str()); if (command == "MEMDUMP") { // Dump memory to file Bit16u seg = (Bit16u)GetHexValue(found,found); found++; Bit32u ofs = GetHexValue(found,found); found++; Bit32u num = GetHexValue(found,found); found++; SaveMemory(seg,ofs,num); return true; }; if (command == "MEMDUMPBIN") { // Dump memory to file bineary Bit16u seg = (Bit16u)GetHexValue(found,found); found++; Bit32u ofs = GetHexValue(found,found); found++; Bit32u num = GetHexValue(found,found); found++; SaveMemoryBin(seg,ofs,num); return true; }; if (command == "IV") { // Insert variable Bit16u seg = (Bit16u)GetHexValue(found,found); found++; Bit32u ofs = (Bit16u)GetHexValue(found,found); found++; char name[16]; for (int i=0; i<16; i++) { if (found[i] && (found[i]!=' ')) name[i] = found[i]; else { name[i] = 0; break; }; }; name[15] = 0; if(!name[0]) return false; DEBUG_ShowMsg("DEBUG: Created debug var %s at %04X:%04X\n",name,seg,ofs); CDebugVar::InsertVariable(name,GetAddress(seg,ofs)); return true; }; if (command == "SV") { // Save variables char name[13]; for (int i=0; i<12; i++) { if (found[i] && (found[i]!=' ')) name[i] = found[i]; else { name[i] = 0; break; }; }; name[12] = 0; if(!name[0]) return false; DEBUG_ShowMsg("DEBUG: Variable list save (%s) : %s.\n",name,(CDebugVar::SaveVars(name)?"ok":"failure")); return true; }; if (command == "LV") { // load variables char name[13]; for (int i=0; i<12; i++) { if (found[i] && (found[i]!=' ')) name[i] = found[i]; else { name[i] = 0; break; }; }; name[12] = 0; if(!name[0]) return false; DEBUG_ShowMsg("DEBUG: Variable list load (%s) : %s.\n",name,(CDebugVar::LoadVars(name)?"ok":"failure")); return true; }; if (command == "SR") { // Set register value DEBUG_ShowMsg("DEBUG: Set Register %s.\n",(ChangeRegister(found)?"success":"failure")); return true; }; if (command == "SM") { // Set memory with following values Bit16u seg = (Bit16u)GetHexValue(found,found); found++; Bit32u ofs = GetHexValue(found,found); found++; Bit16u count = 0; while (*found) { while (*found==' ') found++; if (*found) { Bit8u value = (Bit8u)GetHexValue(found,found); if(*found) found++; mem_writeb_checked(GetAddress(seg,ofs+count),value); count++; } }; DEBUG_ShowMsg("DEBUG: Memory changed.\n"); return true; }; if (command == "BP") { // Add new breakpoint Bit16u seg = (Bit16u)GetHexValue(found,found);found++; // skip ":" Bit32u ofs = GetHexValue(found,found); CBreakpoint::AddBreakpoint(seg,ofs,false); DEBUG_ShowMsg("DEBUG: Set breakpoint at %04X:%04X\n",seg,ofs); return true; }; #if C_HEAVY_DEBUG if (command == "BPM") { // Add new breakpoint Bit16u seg = (Bit16u)GetHexValue(found,found);found++; // skip ":" Bit32u ofs = GetHexValue(found,found); CBreakpoint::AddMemBreakpoint(seg,ofs); DEBUG_ShowMsg("DEBUG: Set memory breakpoint at %04X:%04X\n",seg,ofs); return true; }; if (command == "BPPM") { // Add new breakpoint Bit16u seg = (Bit16u)GetHexValue(found,found);found++; // skip ":" Bit32u ofs = GetHexValue(found,found); CBreakpoint* bp = CBreakpoint::AddMemBreakpoint(seg,ofs); if (bp) { bp->SetType(BKPNT_MEMORY_PROT); DEBUG_ShowMsg("DEBUG: Set prot-mode memory breakpoint at %04X:%08X\n",seg,ofs); } return true; }; if (command == "BPLM") { // Add new breakpoint Bit32u ofs = GetHexValue(found,found); CBreakpoint* bp = CBreakpoint::AddMemBreakpoint(0,ofs); if (bp) bp->SetType(BKPNT_MEMORY_LINEAR); DEBUG_ShowMsg("DEBUG: Set linear memory breakpoint at %08X\n",ofs); return true; }; #endif if (command == "BPINT") { // Add Interrupt Breakpoint Bit8u intNr = (Bit8u)GetHexValue(found,found); bool all = !(*found);found++; Bit8u valAH = (Bit8u)GetHexValue(found,found); if ((valAH==0x00) && (*found=='*' || all)) { CBreakpoint::AddIntBreakpoint(intNr,BPINT_ALL,false); DEBUG_ShowMsg("DEBUG: Set interrupt breakpoint at INT %02X\n",intNr); } else { CBreakpoint::AddIntBreakpoint(intNr,valAH,false); DEBUG_ShowMsg("DEBUG: Set interrupt breakpoint at INT %02X AH=%02X\n",intNr,valAH); } return true; }; if (command == "BPLIST") { DEBUG_ShowMsg("Breakpoint list:\n"); DEBUG_ShowMsg("-------------------------------------------------------------------------\n"); CBreakpoint::ShowList(); return true; }; if (command == "BPDEL") { // Delete Breakpoints Bit8u bpNr = (Bit8u)GetHexValue(found,found); if ((bpNr==0x00) && (*found=='*')) { // Delete all CBreakpoint::DeleteAll(); DEBUG_ShowMsg("DEBUG: Breakpoints deleted.\n"); } else { // delete single breakpoint DEBUG_ShowMsg("DEBUG: Breakpoint deletion %s.\n",(CBreakpoint::DeleteByIndex(bpNr)?"success":"failure")); } return true; }; if (command == "C") { // Set code overview Bit16u codeSeg = (Bit16u)GetHexValue(found,found); found++; Bit32u codeOfs = GetHexValue(found,found); DEBUG_ShowMsg("DEBUG: Set code overview to %04X:%04X\n",codeSeg,codeOfs); codeViewData.useCS = codeSeg; codeViewData.useEIP = codeOfs; codeViewData.cursorPos = 0; return true; }; if (command == "D") { // Set data overview dataSeg = (Bit16u)GetHexValue(found,found); found++; dataOfs = GetHexValue(found,found); DEBUG_ShowMsg("DEBUG: Set data overview to %04X:%04X\n",dataSeg,dataOfs); return true; }; #if C_HEAVY_DEBUG if (command == "LOG") { // Create Cpu normal log file cpuLogType = 1; command = "logcode"; } if (command == "LOGS") { // Create Cpu short log file cpuLogType = 0; command = "logcode"; } if (command == "LOGL") { // Create Cpu long log file cpuLogType = 2; command = "logcode"; } if (command == "logcode") { //Shared code between all logs DEBUG_ShowMsg("DEBUG: Starting log\n"); cpuLogFile.open("LOGCPU.TXT"); if (!cpuLogFile.is_open()) { DEBUG_ShowMsg("DEBUG: Logfile couldn't be created.\n"); return false; } //Initialize log object cpuLogFile << hex << noshowbase << setfill('0') << uppercase; cpuLog = true; cpuLogCounter = GetHexValue(found,found); debugging = false; CBreakpoint::ActivateBreakpoints(SegPhys(cs)+reg_eip,true); ignoreAddressOnce = SegPhys(cs)+reg_eip; DOSBOX_SetNormalLoop(); return true; }; #endif if (command == "INTT") { //trace int. Bit8u intNr = (Bit8u)GetHexValue(found,found); DEBUG_ShowMsg("DEBUG: Tracing INT %02X\n",intNr); CPU_HW_Interrupt(intNr); SetCodeWinStart(); return true; }; if (command == "INT") { // start int. Bit8u intNr = (Bit8u)GetHexValue(found,found); DEBUG_ShowMsg("DEBUG: Starting INT %02X\n",intNr); CBreakpoint::AddBreakpoint(SegValue(cs),reg_eip, true); CBreakpoint::ActivateBreakpoints(SegPhys(cs)+reg_eip-1,true); debugging = false; DrawCode(); DOSBOX_SetNormalLoop(); CPU_HW_Interrupt(intNr); return true; }; if (command == "SELINFO") { while (found[0] == ' ') found++; char out1[200],out2[200]; GetDescriptorInfo(found,out1,out2); DEBUG_ShowMsg("SelectorInfo %s:\n%s\n%s\n",found,out1,out2); return true; }; if (command == "DOS") { stream >> command; if (command == "MCBS") LogMCBS(); return true; } if (command == "GDT") {LogGDT(); return true;} if (command == "LDT") {LogLDT(); return true;} if (command == "IDT") {LogIDT(); return true;} if (command == "PAGING") {LogPages(found); return true;} if (command == "CPU") {LogCPUInfo(); return true;} if (command == "INTVEC") { if (found[0] != 0) { OutputVecTable(found); return true; } }; if (command == "INTHAND") { if (found[0] != 0) { Bit8u intNr = (Bit8u)GetHexValue(found,found); DEBUG_ShowMsg("DEBUG: Set code overview to interrupt handler %X\n",intNr); codeViewData.useCS = mem_readw(intNr*4+2); codeViewData.useEIP = mem_readw(intNr*4); codeViewData.cursorPos = 0; return true; } }; if(command == "EXTEND") { //Toggle additional data. showExtend = !showExtend; return true; }; if(command == "TIMERIRQ") { //Start a timer irq DEBUG_RaiseTimerIrq(); DEBUG_ShowMsg("Debug: Timer Int started.\n"); return true; }; #if C_HEAVY_DEBUG if (command == "HEAVYLOG") { // Create Cpu log file logHeavy = !logHeavy; DEBUG_ShowMsg("DEBUG: Heavy cpu logging %s.\n",logHeavy?"on":"off"); return true; }; if (command == "ZEROPROTECT") { //toggle zero protection zeroProtect = !zeroProtect; DEBUG_ShowMsg("DEBUG: Zero code execution protection %s.\n",zeroProtect?"on":"off"); return true; }; #endif if (command == "HELP" || command == "?") { DEBUG_ShowMsg("Debugger commands (enter all values in hex or as register):\n"); DEBUG_ShowMsg("--------------------------------------------------------------------------\n"); DEBUG_ShowMsg("F3/F6 - Previous command in history.\n"); DEBUG_ShowMsg("F4/F7 - Next command in history.\n"); DEBUG_ShowMsg("F5 - Run.\n"); DEBUG_ShowMsg("F9 - Set/Remove breakpoint.\n"); DEBUG_ShowMsg("F10/F11 - Step over / trace into instruction.\n"); DEBUG_ShowMsg("ALT + D/E/S/X/B - Set data view to DS:SI/ES:DI/SS:SP/DS:DX/ES:BX.\n"); DEBUG_ShowMsg("Escape - Clear input line."); DEBUG_ShowMsg("Up/Down - Move code view cursor.\n"); DEBUG_ShowMsg("Page Up/Down - Scroll data view.\n"); DEBUG_ShowMsg("Home/End - Scroll log messages.\n"); DEBUG_ShowMsg("BP [segment]:[offset] - Set breakpoint.\n"); DEBUG_ShowMsg("BPINT [intNr] * - Set interrupt breakpoint.\n"); DEBUG_ShowMsg("BPINT [intNr] [ah] - Set interrupt breakpoint with ah.\n"); #if C_HEAVY_DEBUG DEBUG_ShowMsg("BPM [segment]:[offset] - Set memory breakpoint (memory change).\n"); DEBUG_ShowMsg("BPPM [selector]:[offset]- Set pmode-memory breakpoint (memory change).\n"); DEBUG_ShowMsg("BPLM [linear address] - Set linear memory breakpoint (memory change).\n"); #endif DEBUG_ShowMsg("BPLIST - List breakpoints.\n"); DEBUG_ShowMsg("BPDEL [bpNr] / * - Delete breakpoint nr / all.\n"); DEBUG_ShowMsg("C / D [segment]:[offset] - Set code / data view address.\n"); DEBUG_ShowMsg("DOS MCBS - Show Memory Control Block chain.\n"); DEBUG_ShowMsg("INT [nr] / INTT [nr] - Execute / Trace into interrupt.\n"); #if C_HEAVY_DEBUG DEBUG_ShowMsg("LOG [num] - Write cpu log file.\n"); DEBUG_ShowMsg("LOGS/LOGL [num] - Write short/long cpu log file.\n"); DEBUG_ShowMsg("HEAVYLOG - Enable/Disable automatic cpu log when dosbox exits.\n"); DEBUG_ShowMsg("ZEROPROTECT - Enable/Disable zero code execution detecion.\n"); #endif DEBUG_ShowMsg("SR [reg] [value] - Set register value.\n"); DEBUG_ShowMsg("SM [seg]:[off] [val] [.]..- Set memory with following values.\n"); DEBUG_ShowMsg("IV [seg]:[off] [name] - Create var name for memory address.\n"); DEBUG_ShowMsg("SV [filename] - Save var list in file.\n"); DEBUG_ShowMsg("LV [filename] - Load var list from file.\n"); DEBUG_ShowMsg("MEMDUMP [seg]:[off] [len] - Write memory to file memdump.txt.\n"); DEBUG_ShowMsg("MEMDUMPBIN [s]:[o] [len] - Write memory to file memdump.bin.\n"); DEBUG_ShowMsg("SELINFO [segName] - Show selector info.\n"); DEBUG_ShowMsg("INTVEC [filename] - Writes interrupt vector table to file.\n"); DEBUG_ShowMsg("INTHAND [intNum] - Set code view to interrupt handler.\n"); DEBUG_ShowMsg("CPU - Display CPU status information.\n"); DEBUG_ShowMsg("GDT - Lists descriptors of the GDT.\n"); DEBUG_ShowMsg("LDT - Lists descriptors of the LDT.\n"); DEBUG_ShowMsg("IDT - Lists descriptors of the IDT.\n"); DEBUG_ShowMsg("PAGING [page] - Display content of page table.\n"); DEBUG_ShowMsg("EXTEND - Toggle additional info.\n"); DEBUG_ShowMsg("TIMERIRQ - Run the system timer.\n"); DEBUG_ShowMsg("HELP - Help\n"); return true; }; return false; }; char* AnalyzeInstruction(char* inst, bool saveSelector) { static char result[256]; char instu[256]; char prefix[3]; Bit16u seg; strcpy(instu,inst); upcase(instu); result[0] = 0; char* pos = strchr(instu,'['); if (pos) { // Segment prefix ? if (*(pos-1)==':') { char* segpos = pos-3; prefix[0] = tolower(*segpos); prefix[1] = tolower(*(segpos+1)); prefix[2] = 0; seg = (Bit16u)GetHexValue(segpos,segpos); } else { if (strstr(pos,"SP") || strstr(pos,"BP")) { seg = SegValue(ss); strcpy(prefix,"ss"); } else { seg = SegValue(ds); strcpy(prefix,"ds"); }; }; pos++; Bit32u adr = GetHexValue(pos,pos); while (*pos!=']') { if (*pos=='+') { pos++; adr += GetHexValue(pos,pos); } else if (*pos=='-') { pos++; adr -= GetHexValue(pos,pos); } else pos++; }; Bit32u address = GetAddress(seg,adr); if (!(get_tlb_readhandler(address)->flags & PFLAG_INIT)) { static char outmask[] = "%s:[%04X]=%02X"; if (cpu.pmode) outmask[6] = '8'; switch (DasmLastOperandSize()) { case 8 : { Bit8u val = mem_readb(address); outmask[12] = '2'; sprintf(result,outmask,prefix,adr,val); } break; case 16: { Bit16u val = mem_readw(address); outmask[12] = '4'; sprintf(result,outmask,prefix,adr,val); } break; case 32: { Bit32u val = mem_readd(address); outmask[12] = '8'; sprintf(result,outmask,prefix,adr,val); } break; } } else { sprintf(result,"[illegal]"); } // Variable found ? CDebugVar* var = CDebugVar::FindVar(address); if (var) { // Replace occurence char* pos1 = strchr(inst,'['); char* pos2 = strchr(inst,']'); if (pos1 && pos2) { char temp[256]; strcpy(temp,pos2); // save end pos1++; *pos1 = 0; // cut after '[' strcat(inst,var->GetName()); // add var name strcat(inst,temp); // add end }; }; // show descriptor info, if available if ((cpu.pmode) && saveSelector) { strcpy(curSelectorName,prefix); }; }; // If it is a callback add additional info pos = strstr(inst,"callback"); if (pos) { pos += 9; Bitu nr = GetHexValue(pos,pos); const char* descr = CALLBACK_GetDescription(nr); if (descr) { strcat(inst," ("); strcat(inst,descr); strcat(inst,")"); } }; // Must be a jump if (instu[0] == 'J') { bool jmp = false; switch (instu[1]) { case 'A' : { jmp = (get_CF()?false:true) && (get_ZF()?false:true); // JA } break; case 'B' : { if (instu[2] == 'E') { jmp = (get_CF()?true:false) || (get_ZF()?true:false); // JBE } else { jmp = get_CF()?true:false; // JB } } break; case 'C' : { if (instu[2] == 'X') { jmp = reg_cx == 0; // JCXZ } else { jmp = get_CF()?true:false; // JC } } break; case 'E' : { jmp = get_ZF()?true:false; // JE } break; case 'G' : { if (instu[2] == 'E') { jmp = (get_SF()?true:false)==(get_OF()?true:false); // JGE } else { jmp = (get_ZF()?false:true) && ((get_SF()?true:false)==(get_OF()?true:false)); // JG } } break; case 'L' : { if (instu[2] == 'E') { jmp = (get_ZF()?true:false) || ((get_SF()?true:false)!=(get_OF()?true:false)); // JLE } else { jmp = (get_SF()?true:false)!=(get_OF()?true:false); // JL } } break; case 'M' : { jmp = true; // JMP } break; case 'N' : { switch (instu[2]) { case 'B' : case 'C' : { jmp = get_CF()?false:true; // JNB / JNC } break; case 'E' : { jmp = get_ZF()?false:true; // JNE } break; case 'O' : { jmp = get_OF()?false:true; // JNO } break; case 'P' : { jmp = get_PF()?false:true; // JNP } break; case 'S' : { jmp = get_SF()?false:true; // JNS } break; case 'Z' : { jmp = get_ZF()?false:true; // JNZ } break; } } break; case 'O' : { jmp = get_OF()?true:false; // JO } break; case 'P' : { if (instu[2] == 'O') { jmp = get_PF()?false:true; // JPO } else { jmp = get_SF()?true:false; // JP / JPE } } break; case 'S' : { jmp = get_SF()?true:false; // JS } break; case 'Z' : { jmp = get_ZF()?true:false; // JZ } break; } if (jmp) { pos = strchr(instu,'$'); if (pos) { pos = strchr(instu,'+'); if (pos) { strcpy(result,"(down)"); } else { strcpy(result,"(up)"); } } } else { sprintf(result,"(no jmp)"); } } return result; }; Bit32u DEBUG_CheckKeys(void) { Bits ret=0; int key=getch(); if (key>0) { #if defined(WIN32) && defined(__PDCURSES__) switch (key) { case PADENTER: key=0x0A; break; case PADSLASH: key='/'; break; case PADSTAR: key='*'; break; case PADMINUS: key='-'; break; case PADPLUS: key='+'; break; case ALT_D: if (ungetch('D') != ERR) key=27; break; case ALT_E: if (ungetch('E') != ERR) key=27; break; case ALT_X: if (ungetch('X') != ERR) key=27; break; case ALT_B: if (ungetch('B') != ERR) key=27; break; case ALT_S: if (ungetch('S') != ERR) key=27; break; } #endif switch (toupper(key)) { case 27: // escape (a bit slow): Clears line. and processes alt commands. key=getch(); if(key < 0) { //Purely escape Clear line ClearInputLine(); break; } switch(toupper(key)) { case 'D' : // ALT - D: DS:SI dataSeg = SegValue(ds); if (cpu.pmode && !(reg_flags & FLAG_VM)) dataOfs = reg_esi; else dataOfs = reg_si; break; case 'E' : //ALT - E: es:di dataSeg = SegValue(es); if (cpu.pmode && !(reg_flags & FLAG_VM)) dataOfs = reg_edi; else dataOfs = reg_di; break; case 'X': //ALT - X: ds:dx dataSeg = SegValue(ds); if (cpu.pmode && !(reg_flags & FLAG_VM)) dataOfs = reg_edx; else dataOfs = reg_dx; break; case 'B' : //ALT -B: es:bx dataSeg = SegValue(es); if (cpu.pmode && !(reg_flags & FLAG_VM)) dataOfs = reg_ebx; else dataOfs = reg_bx; break; case 'S': //ALT - S: ss:sp dataSeg = SegValue(ss); if (cpu.pmode && !(reg_flags & FLAG_VM)) dataOfs = reg_esp; else dataOfs = reg_sp; break; default: break; } break; case KEY_PPAGE : dataOfs -= 16; break; case KEY_NPAGE : dataOfs += 16; break; case KEY_DOWN: // down if (codeViewData.cursorPos<9) codeViewData.cursorPos++; else codeViewData.useEIP += codeViewData.firstInstSize; break; case KEY_UP: // up if (codeViewData.cursorPos>0) codeViewData.cursorPos--; else { Bitu bytes = 0; char dline[200]; Bitu size = 0; Bit32u newEIP = codeViewData.useEIP - 1; if(codeViewData.useEIP) { for (; bytes < 10; bytes++) { PhysPt start = GetAddress(codeViewData.useCS,newEIP); size = DasmI386(dline, start, newEIP, cpu.code.big); if(codeViewData.useEIP == newEIP+size) break; newEIP--; } if (bytes>=10) newEIP = codeViewData.useEIP - 1; } codeViewData.useEIP = newEIP; } break; case KEY_HOME: // Home: scroll log page up DEBUG_RefreshPage(-1); break; case KEY_END: // End: scroll log page down DEBUG_RefreshPage(1); break; case KEY_IC: // Insert: toggle insert/overwrite codeViewData.ovrMode = !codeViewData.ovrMode; break; case KEY_LEFT: // move to the left in command line if (codeViewData.inputPos > 0) codeViewData.inputPos--; break; case KEY_RIGHT: // move to the right in command line if (codeViewData.inputStr[codeViewData.inputPos]) codeViewData.inputPos++; break; case KEY_F(6): // previous command (f1-f4 generate rubbish at my place) case KEY_F(3): // previous command if (histBuffPos == histBuff.begin()) break; if (histBuffPos == histBuff.end()) { // copy inputStr to suspInputStr so we can restore it safe_strncpy(codeViewData.suspInputStr, codeViewData.inputStr, sizeof(codeViewData.suspInputStr)); } safe_strncpy(codeViewData.inputStr,(*--histBuffPos).c_str(),sizeof(codeViewData.inputStr)); codeViewData.inputPos = strlen(codeViewData.inputStr); break; case KEY_F(7): // next command (f1-f4 generate rubbish at my place) case KEY_F(4): // next command if (histBuffPos == histBuff.end()) break; if (++histBuffPos != histBuff.end()) { safe_strncpy(codeViewData.inputStr,(*histBuffPos).c_str(),sizeof(codeViewData.inputStr)); } else { // copy suspInputStr back into inputStr safe_strncpy(codeViewData.inputStr, codeViewData.suspInputStr, sizeof(codeViewData.inputStr)); } codeViewData.inputPos = strlen(codeViewData.inputStr); break; case KEY_F(5): // Run Program debugging=false; CBreakpoint::ActivateBreakpoints(SegPhys(cs)+reg_eip,true); ignoreAddressOnce = SegPhys(cs)+reg_eip; DOSBOX_SetNormalLoop(); break; case KEY_F(9): // Set/Remove Breakpoint { PhysPt ptr = GetAddress(codeViewData.cursorSeg,codeViewData.cursorOfs); if (CBreakpoint::IsBreakpoint(ptr)) { CBreakpoint::DeleteBreakpoint(ptr); DEBUG_ShowMsg("DEBUG: Breakpoint deletion success.\n"); } else { CBreakpoint::AddBreakpoint(codeViewData.cursorSeg, codeViewData.cursorOfs, false); DEBUG_ShowMsg("DEBUG: Set breakpoint at %04X:%04X\n",codeViewData.cursorSeg,codeViewData.cursorOfs); } } break; case KEY_F(10): // Step over inst if (StepOver()) return 0; else { exitLoop = false; skipFirstInstruction = true; // for heavy debugger CPU_Cycles = 1; ret=(*cpudecoder)(); SetCodeWinStart(); CBreakpoint::ignoreOnce = 0; } break; case KEY_F(11): // trace into exitLoop = false; skipFirstInstruction = true; // for heavy debugger CPU_Cycles = 1; ret = (*cpudecoder)(); SetCodeWinStart(); CBreakpoint::ignoreOnce = 0; break; case 0x0A: //Parse typed Command codeViewData.inputStr[MAXCMDLEN] = '\0'; if(ParseCommand(codeViewData.inputStr)) { char* cmd = ltrim(codeViewData.inputStr); if (histBuff.empty() || *--histBuff.end()!=cmd) histBuff.push_back(cmd); if (histBuff.size() > MAX_HIST_BUFFER) histBuff.pop_front(); histBuffPos = histBuff.end(); ClearInputLine(); } else { codeViewData.inputPos = strlen(codeViewData.inputStr); } break; case KEY_BACKSPACE: //backspace (linux) case 0x7f: // backspace in some terminal emulators (linux) case 0x08: // delete if (codeViewData.inputPos == 0) break; codeViewData.inputPos--; // fallthrough case KEY_DC: // delete character if ((codeViewData.inputPos<0) || (codeViewData.inputPos>=MAXCMDLEN)) break; if (codeViewData.inputStr[codeViewData.inputPos] != 0) { codeViewData.inputStr[MAXCMDLEN] = '\0'; for(char* p=&codeViewData.inputStr[codeViewData.inputPos];(*p=*(p+1));p++) {} } break; default: if ((key>=32) && (key<127)) { if ((codeViewData.inputPos<0) || (codeViewData.inputPos>=MAXCMDLEN)) break; codeViewData.inputStr[MAXCMDLEN] = '\0'; if (codeViewData.inputStr[codeViewData.inputPos] == 0) { codeViewData.inputStr[codeViewData.inputPos++] = char(key); codeViewData.inputStr[codeViewData.inputPos] = '\0'; } else if (!codeViewData.ovrMode) { int len = (int) strlen(codeViewData.inputStr); if (len < MAXCMDLEN) { for(len++;len>codeViewData.inputPos;len--) codeViewData.inputStr[len]=codeViewData.inputStr[len-1]; codeViewData.inputStr[codeViewData.inputPos++] = char(key); } } else { codeViewData.inputStr[codeViewData.inputPos++] = char(key); } } else if (key==killchar()) { ClearInputLine(); } break; } if (ret<0) return ret; if (ret>0) { if (GCC_UNLIKELY(ret >= CB_MAX)) ret = 0; else ret = (*CallBack_Handlers[ret])(); if (ret) { exitLoop=true; CPU_Cycles=CPU_CycleLeft=0; return ret; } } ret=0; DEBUG_DrawScreen(); } return ret; }; Bitu DEBUG_Loop(void) { //TODO Disable sound GFX_Events(); // Interrupt started ? - then skip it Bit16u oldCS = SegValue(cs); Bit32u oldEIP = reg_eip; PIC_runIRQs(); SDL_Delay(1); if ((oldCS!=SegValue(cs)) || (oldEIP!=reg_eip)) { CBreakpoint::AddBreakpoint(oldCS,oldEIP,true); CBreakpoint::ActivateBreakpoints(SegPhys(cs)+reg_eip,true); debugging=false; DOSBOX_SetNormalLoop(); return 0; } return DEBUG_CheckKeys(); } void DEBUG_Enable(bool pressed) { if (!pressed) return; static bool showhelp=false; debugging=true; SetCodeWinStart(); DEBUG_DrawScreen(); DOSBOX_SetLoop(&DEBUG_Loop); if(!showhelp) { showhelp=true; DEBUG_ShowMsg("***| TYPE HELP (+ENTER) TO GET AN OVERVIEW OF ALL COMMANDS |***\n"); } KEYBOARD_ClrBuffer(); } void DEBUG_DrawScreen(void) { DrawData(); DrawCode(); DrawRegisters(); DrawVariables(); } static void DEBUG_RaiseTimerIrq(void) { PIC_ActivateIRQ(0); } // Display the content of the MCB chain starting with the MCB at the specified segment. static void LogMCBChain(Bit16u mcb_segment) { DOS_MCB mcb(mcb_segment); char filename[9]; // 8 characters plus a terminating NUL const char *psp_seg_note; PhysPt dataAddr = PhysMake(dataSeg,dataOfs);// location being viewed in the "Data Overview" // loop forever, breaking out of the loop once we've processed the last MCB while (true) { // verify that the type field is valid if (mcb.GetType()!=0x4d && mcb.GetType()!=0x5a) { LOG(LOG_MISC,LOG_ERROR)("MCB chain broken at %04X:0000!",mcb_segment); return; } mcb.GetFileName(filename); // some PSP segment values have special meanings switch (mcb.GetPSPSeg()) { case MCB_FREE: psp_seg_note = "(free)"; break; case MCB_DOS: psp_seg_note = "(DOS)"; break; default: psp_seg_note = ""; } LOG(LOG_MISC,LOG_ERROR)(" %04X %12u %04X %-7s %s",mcb_segment,mcb.GetSize() << 4,mcb.GetPSPSeg(), psp_seg_note, filename); // print a message if dataAddr is within this MCB's memory range PhysPt mcbStartAddr = PhysMake(mcb_segment+1,0); PhysPt mcbEndAddr = PhysMake(mcb_segment+1+mcb.GetSize(),0); if (dataAddr >= mcbStartAddr && dataAddr < mcbEndAddr) { LOG(LOG_MISC,LOG_ERROR)(" (data addr %04hX:%04X is %u bytes past this MCB)",dataSeg,dataOfs,dataAddr - mcbStartAddr); } // if we've just processed the last MCB in the chain, break out of the loop if (mcb.GetType()==0x5a) { break; } // else, move to the next MCB in the chain mcb_segment+=mcb.GetSize()+1; mcb.SetPt(mcb_segment); } } // Display the content of all Memory Control Blocks. static void LogMCBS(void) { LOG(LOG_MISC,LOG_ERROR)("MCB Seg Size (bytes) PSP Seg (notes) Filename"); LOG(LOG_MISC,LOG_ERROR)("Conventional memory:"); LogMCBChain(dos.firstMCB); LOG(LOG_MISC,LOG_ERROR)("Upper memory:"); LogMCBChain(dos_infoblock.GetStartOfUMBChain()); } static void LogGDT(void) { char out1[512]; Descriptor desc; Bitu length = cpu.gdt.GetLimit(); PhysPt address = cpu.gdt.GetBase(); PhysPt max = address + length; Bitu i = 0; LOG(LOG_MISC,LOG_ERROR)("GDT Base:%08X Limit:%08X",address,length); while (address<max) { desc.Load(address); sprintf(out1,"%04X: b:%08X type: %02X parbg",(i<<3),desc.GetBase(),desc.desc.seg.type); LOG(LOG_MISC,LOG_ERROR)(out1); sprintf(out1," l:%08X dpl : %01X %1X%1X%1X%1X%1X",desc.GetLimit(),desc.desc.seg.dpl,desc.desc.seg.p,desc.desc.seg.avl,desc.desc.seg.r,desc.desc.seg.big,desc.desc.seg.g); LOG(LOG_MISC,LOG_ERROR)(out1); address+=8; i++; }; }; static void LogLDT(void) { char out1[512]; Descriptor desc; Bitu ldtSelector = cpu.gdt.SLDT(); if (!cpu.gdt.GetDescriptor(ldtSelector,desc)) return; Bitu length = desc.GetLimit(); PhysPt address = desc.GetBase(); PhysPt max = address + length; Bitu i = 0; LOG(LOG_MISC,LOG_ERROR)("LDT Base:%08X Limit:%08X",address,length); while (address<max) { desc.Load(address); sprintf(out1,"%04X: b:%08X type: %02X parbg",(i<<3)|4,desc.GetBase(),desc.desc.seg.type); LOG(LOG_MISC,LOG_ERROR)(out1); sprintf(out1," l:%08X dpl : %01X %1X%1X%1X%1X%1X",desc.GetLimit(),desc.desc.seg.dpl,desc.desc.seg.p,desc.desc.seg.avl,desc.desc.seg.r,desc.desc.seg.big,desc.desc.seg.g); LOG(LOG_MISC,LOG_ERROR)(out1); address+=8; i++; }; }; static void LogIDT(void) { char out1[512]; Descriptor desc; Bitu address = 0; while (address<256*8) { if (cpu.idt.GetDescriptor(address,desc)) { sprintf(out1,"%04X: sel:%04X off:%02X",address/8,desc.GetSelector(),desc.GetOffset()); LOG(LOG_MISC,LOG_ERROR)(out1); } address+=8; }; }; void LogPages(char* selname) { char out1[512]; if (paging.enabled) { Bitu sel = GetHexValue(selname,selname); if ((sel==0x00) && ((*selname==0) || (*selname=='*'))) { for (int i=0; i<0xfffff; i++) { Bitu table_addr=(paging.base.page<<12)+(i >> 10)*4; X86PageEntry table; table.load=phys_readd(table_addr); if (table.block.p) { X86PageEntry entry; Bitu entry_addr=(table.block.base<<12)+(i & 0x3ff)*4; entry.load=phys_readd(entry_addr); if (entry.block.p) { sprintf(out1,"page %05Xxxx -> %04Xxxx flags [uw] %x:%x::%x:%x [d=%x|a=%x]", i,entry.block.base,entry.block.us,table.block.us, entry.block.wr,table.block.wr,entry.block.d,entry.block.a); LOG(LOG_MISC,LOG_ERROR)(out1); } } } } else { Bitu table_addr=(paging.base.page<<12)+(sel >> 10)*4; X86PageEntry table; table.load=phys_readd(table_addr); if (table.block.p) { X86PageEntry entry; Bitu entry_addr=(table.block.base<<12)+(sel & 0x3ff)*4; entry.load=phys_readd(entry_addr); sprintf(out1,"page %05Xxxx -> %04Xxxx flags [puw] %x:%x::%x:%x::%x:%x",sel,entry.block.base,entry.block.p,table.block.p,entry.block.us,table.block.us,entry.block.wr,table.block.wr); LOG(LOG_MISC,LOG_ERROR)(out1); } else { sprintf(out1,"pagetable %03X not present, flags [puw] %x::%x::%x",(sel >> 10),table.block.p,table.block.us,table.block.wr); LOG(LOG_MISC,LOG_ERROR)(out1); } } } }; static void LogCPUInfo(void) { char out1[512]; sprintf(out1,"cr0:%08X cr2:%08X cr3:%08X cpl=%x",cpu.cr0,paging.cr2,paging.cr3,cpu.cpl); LOG(LOG_MISC,LOG_ERROR)(out1); sprintf(out1,"eflags:%08X [vm=%x iopl=%x nt=%x]",reg_flags,GETFLAG(VM)>>17,GETFLAG(IOPL)>>12,GETFLAG(NT)>>14); LOG(LOG_MISC,LOG_ERROR)(out1); sprintf(out1,"GDT base=%08X limit=%08X",cpu.gdt.GetBase(),cpu.gdt.GetLimit()); LOG(LOG_MISC,LOG_ERROR)(out1); sprintf(out1,"IDT base=%08X limit=%08X",cpu.idt.GetBase(),cpu.idt.GetLimit()); LOG(LOG_MISC,LOG_ERROR)(out1); Bitu sel=CPU_STR(); Descriptor desc; if (cpu.gdt.GetDescriptor(sel,desc)) { sprintf(out1,"TR selector=%04X, base=%08X limit=%08X*%X",sel,desc.GetBase(),desc.GetLimit(),desc.desc.seg.g?0x4000:1); LOG(LOG_MISC,LOG_ERROR)(out1); } sel=CPU_SLDT(); if (cpu.gdt.GetDescriptor(sel,desc)) { sprintf(out1,"LDT selector=%04X, base=%08X limit=%08X*%X",sel,desc.GetBase(),desc.GetLimit(),desc.desc.seg.g?0x4000:1); LOG(LOG_MISC,LOG_ERROR)(out1); } }; #if C_HEAVY_DEBUG static void LogInstruction(Bit16u segValue, Bit32u eipValue, ofstream& out) { static char empty[23] = { 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,0 }; PhysPt start = GetAddress(segValue,eipValue); char dline[200];Bitu size; size = DasmI386(dline, start, reg_eip, cpu.code.big); char* res = empty; if (showExtend && (cpuLogType > 0) ) { res = AnalyzeInstruction(dline,false); if (!res || !(*res)) res = empty; Bitu reslen = strlen(res); if (reslen<22) for (Bitu i=0; i<22-reslen; i++) res[reslen+i] = ' '; res[22] = 0; }; Bitu len = strlen(dline); if (len<30) for (Bitu i=0; i<30-len; i++) dline[len + i] = ' '; dline[30] = 0; // Get register values if(cpuLogType == 0) { out << setw(4) << SegValue(cs) << ":" << setw(4) << reg_eip << " " << dline; } else if (cpuLogType == 1) { out << setw(4) << SegValue(cs) << ":" << setw(8) << reg_eip << " " << dline << " " << res; } else if (cpuLogType == 2) { char ibytes[200]=""; char tmpc[200]; for (Bitu i=0; i<size; i++) { Bit8u value; if (mem_readb_checked(start+i,&value)) sprintf(tmpc,"%s","?? "); else sprintf(tmpc,"%02X ",value); strcat(ibytes,tmpc); } len = strlen(ibytes); if (len<21) { for (Bitu i=0; i<21-len; i++) ibytes[len + i] =' '; ibytes[21]=0;} //NOTE THE BRACKETS out << setw(4) << SegValue(cs) << ":" << setw(8) << reg_eip << " " << dline << " " << res << " " << ibytes; } out << " EAX:" << setw(8) << reg_eax << " EBX:" << setw(8) << reg_ebx << " ECX:" << setw(8) << reg_ecx << " EDX:" << setw(8) << reg_edx << " ESI:" << setw(8) << reg_esi << " EDI:" << setw(8) << reg_edi << " EBP:" << setw(8) << reg_ebp << " ESP:" << setw(8) << reg_esp << " DS:" << setw(4) << SegValue(ds)<< " ES:" << setw(4) << SegValue(es); if(cpuLogType == 0) { out << " SS:" << setw(4) << SegValue(ss) << " C" << (get_CF()>0) << " Z" << (get_ZF()>0) << " S" << (get_SF()>0) << " O" << (get_OF()>0) << " I" << GETFLAGBOOL(IF); } else { out << " FS:" << setw(4) << SegValue(fs) << " GS:" << setw(4) << SegValue(gs) << " SS:" << setw(4) << SegValue(ss) << " CF:" << (get_CF()>0) << " ZF:" << (get_ZF()>0) << " SF:" << (get_SF()>0) << " OF:" << (get_OF()>0) << " AF:" << (get_AF()>0) << " PF:" << (get_PF()>0) << " IF:" << GETFLAGBOOL(IF); } if(cpuLogType == 2) { out << " TF:" << GETFLAGBOOL(TF) << " VM:" << GETFLAGBOOL(VM) <<" FLG:" << setw(8) << reg_flags << " CR0:" << setw(8) << cpu.cr0; } out << endl; }; #endif // DEBUG.COM stuff class DEBUG : public Program { public: DEBUG() { pDebugcom = this; active = false; }; ~DEBUG() { pDebugcom = 0; }; bool IsActive() { return active; }; void Run(void) { if(cmd->FindExist("/NOMOUSE",false)) { real_writed(0,0x33<<2,0); return; } char filename[128]; char args[256]; cmd->FindCommand(1,temp_line); safe_strncpy(filename,temp_line.c_str(),128); // Read commandline Bit16u i =2; args[0] = 0; for (;cmd->FindCommand(i++,temp_line)==true;) { strncat(args,temp_line.c_str(),256); strncat(args," ",256); } // Start new shell and execute prog active = true; // Save cpu state.... Bit16u oldcs = SegValue(cs); Bit32u oldeip = reg_eip; Bit16u oldss = SegValue(ss); Bit32u oldesp = reg_esp; // Workaround : Allocate Stack Space Bit16u segment; Bit16u size = 0x200 / 0x10; if (DOS_AllocateMemory(&segment,&size)) { SegSet16(ss,segment); reg_sp = 0x200; // Start shell DOS_Shell shell; shell.Execute(filename,args); DOS_FreeMemory(segment); } // set old reg values SegSet16(ss,oldss); reg_esp = oldesp; SegSet16(cs,oldcs); reg_eip = oldeip; }; private: bool active; }; void DEBUG_CheckExecuteBreakpoint(Bit16u seg, Bit32u off) { if (pDebugcom && pDebugcom->IsActive()) { CBreakpoint::AddBreakpoint(seg,off,true); CBreakpoint::ActivateBreakpoints(SegPhys(cs)+reg_eip,true); pDebugcom = 0; }; }; Bitu DEBUG_EnableDebugger(void) { exitLoop = true; DEBUG_Enable(true); CPU_Cycles=CPU_CycleLeft=0; return 0; }; static void DEBUG_ProgramStart(Program * * make) { *make=new DEBUG; } // INIT void DEBUG_SetupConsole(void) { #ifdef WIN32 WIN32_Console(); #else tcgetattr(0,&consolesettings); printf("\e[8;50;80t"); //resize terminal fflush(NULL); #endif memset((void *)&dbg,0,sizeof(dbg)); debugging=false; // dbg.active_win=3; /* Start the Debug Gui */ DBGUI_StartUp(); } void DEBUG_ShutDown(Section * /*sec*/) { CBreakpoint::DeleteAll(); CDebugVar::DeleteAll(); curs_set(old_cursor_state); endwin(); #ifndef WIN32 tcsetattr(0, TCSANOW,&consolesettings); // printf("\e[0m\e[2J"); //Seems to destroy scrolling printf("\ec"); fflush(NULL); #endif } Bitu debugCallback; void DEBUG_Init(Section* sec) { // MSG_Add("DEBUG_CONFIGFILE_HELP","Debugger related options.\n"); DEBUG_DrawScreen(); /* Add some keyhandlers */ MAPPER_AddHandler(DEBUG_Enable,MK_pause,MMOD2,"debugger","Debugger"); /* Reset code overview and input line */ memset((void*)&codeViewData,0,sizeof(codeViewData)); /* setup debug.com */ PROGRAMS_MakeFile("DEBUG.COM",DEBUG_ProgramStart); /* Setup callback */ debugCallback=CALLBACK_Allocate(); CALLBACK_Setup(debugCallback,DEBUG_EnableDebugger,CB_RETF,"debugger"); /* shutdown function */ sec->AddDestroyFunction(&DEBUG_ShutDown); } // DEBUGGING VAR STUFF void CDebugVar::InsertVariable(char* name, PhysPt adr) { varList.push_back(new CDebugVar(name,adr)); }; void CDebugVar::DeleteAll(void) { std::list<CDebugVar*>::iterator i; CDebugVar* bp; for(i=varList.begin(); i != varList.end(); i++) { bp = static_cast<CDebugVar*>(*i); delete bp; }; (varList.clear)(); }; CDebugVar* CDebugVar::FindVar(PhysPt pt) { std::list<CDebugVar*>::iterator i; CDebugVar* bp; for(i=varList.begin(); i != varList.end(); i++) { bp = static_cast<CDebugVar*>(*i); if (bp->GetAdr()==pt) return bp; }; return 0; }; bool CDebugVar::SaveVars(char* name) { if (varList.size()>65535) return false; FILE* f = fopen(name,"wb+"); if (!f) return false; // write number of vars Bit16u num = (Bit16u)varList.size(); fwrite(&num,1,sizeof(num),f); std::list<CDebugVar*>::iterator i; CDebugVar* bp; for(i=varList.begin(); i != varList.end(); i++) { bp = static_cast<CDebugVar*>(*i); // name fwrite(bp->GetName(),1,16,f); // adr PhysPt adr = bp->GetAdr(); fwrite(&adr,1,sizeof(adr),f); }; fclose(f); return true; }; bool CDebugVar::LoadVars(char* name) { FILE* f = fopen(name,"rb"); if (!f) return false; // read number of vars Bit16u num; fread(&num,1,sizeof(num),f); for (Bit16u i=0; i<num; i++) { char name[16]; // name fread(name,1,16,f); // adr PhysPt adr; fread(&adr,1,sizeof(adr),f); // insert InsertVariable(name,adr); }; fclose(f); return true; }; static void SaveMemory(Bitu seg, Bitu ofs1, Bit32u num) { FILE* f = fopen("MEMDUMP.TXT","wt"); if (!f) { DEBUG_ShowMsg("DEBUG: Memory dump failed.\n"); return; } char buffer[128]; char temp[16]; while (num>16) { sprintf(buffer,"%04X:%04X ",seg,ofs1); for (Bit16u x=0; x<16; x++) { Bit8u value; if (mem_readb_checked(GetAddress(seg,ofs1+x),&value)) sprintf(temp,"%s","?? "); else sprintf(temp,"%02X ",value); strcat(buffer,temp); } ofs1+=16; num-=16; fprintf(f,"%s\n",buffer); } if (num>0) { sprintf(buffer,"%04X:%04X ",seg,ofs1); for (Bit16u x=0; x<num; x++) { Bit8u value; if (mem_readb_checked(GetAddress(seg,ofs1+x),&value)) sprintf(temp,"%s","?? "); else sprintf(temp,"%02X ",value); strcat(buffer,temp); } fprintf(f,"%s\n",buffer); } fclose(f); DEBUG_ShowMsg("DEBUG: Memory dump success.\n"); } static void SaveMemoryBin(Bitu seg, Bitu ofs1, Bit32u num) { FILE* f = fopen("MEMDUMP.BIN","wb"); if (!f) { DEBUG_ShowMsg("DEBUG: Memory binary dump failed.\n"); return; } for (Bitu x = 0; x < num;x++) { Bit8u val; if (mem_readb_checked(GetAddress(seg,ofs1+x),&val)) val=0; fwrite(&val,1,1,f); } fclose(f); DEBUG_ShowMsg("DEBUG: Memory dump binary success.\n"); } static void OutputVecTable(char* filename) { FILE* f = fopen(filename, "wt"); if (!f) { DEBUG_ShowMsg("DEBUG: Output of interrupt vector table failed.\n"); return; } for (int i=0; i<256; i++) fprintf(f,"INT %02X: %04X:%04X\n", i, mem_readw(i*4+2), mem_readw(i*4)); fclose(f); DEBUG_ShowMsg("DEBUG: Interrupt vector table written to %s.\n", filename); } #define DEBUG_VAR_BUF_LEN 16 static void DrawVariables(void) { if (CDebugVar::varList.empty()) return; std::list<CDebugVar*>::iterator i; CDebugVar *dv; char buffer[DEBUG_VAR_BUF_LEN]; int idx = 0; for(i=CDebugVar::varList.begin(); i != CDebugVar::varList.end(); i++, idx++) { if (idx == 4*3) { /* too many variables */ break; } dv = static_cast<CDebugVar*>(*i); Bit16u value; if (mem_readw_checked(dv->GetAdr(),&value)) snprintf(buffer,DEBUG_VAR_BUF_LEN, "%s", "??????"); else snprintf(buffer,DEBUG_VAR_BUF_LEN, "0x%04x", value); int y = idx / 3; int x = (idx % 3) * 26; mvwprintw(dbg.win_var, y, x, dv->GetName()); mvwprintw(dbg.win_var, y, (x + DEBUG_VAR_BUF_LEN + 1) , buffer); } wrefresh(dbg.win_var); }; #undef DEBUG_VAR_BUF_LEN // HEAVY DEBUGGING STUFF #if C_HEAVY_DEBUG const Bit32u LOGCPUMAX = 20000; static Bit32u logCount = 0; struct TLogInst { Bit16u s_cs; Bit32u eip; Bit32u eax; Bit32u ebx; Bit32u ecx; Bit32u edx; Bit32u esi; Bit32u edi; Bit32u ebp; Bit32u esp; Bit16u s_ds; Bit16u s_es; Bit16u s_fs; Bit16u s_gs; Bit16u s_ss; bool c; bool z; bool s; bool o; bool a; bool p; bool i; char dline[31]; char res[23]; }; TLogInst logInst[LOGCPUMAX]; void DEBUG_HeavyLogInstruction(void) { static char empty[23] = { 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,0 }; PhysPt start = GetAddress(SegValue(cs),reg_eip); char dline[200]; DasmI386(dline, start, reg_eip, cpu.code.big); char* res = empty; if (showExtend) { res = AnalyzeInstruction(dline,false); if (!res || !(*res)) res = empty; Bitu reslen = strlen(res); if (reslen<22) for (Bitu i=0; i<22-reslen; i++) res[reslen+i] = ' '; res[22] = 0; }; Bitu len = strlen(dline); if (len < 30) for (Bitu i=0; i < 30-len; i++) dline[len+i] = ' '; dline[30] = 0; TLogInst & inst = logInst[logCount]; strcpy(inst.dline,dline); inst.s_cs = SegValue(cs); inst.eip = reg_eip; strcpy(inst.res,res); inst.eax = reg_eax; inst.ebx = reg_ebx; inst.ecx = reg_ecx; inst.edx = reg_edx; inst.esi = reg_esi; inst.edi = reg_edi; inst.ebp = reg_ebp; inst.esp = reg_esp; inst.s_ds = SegValue(ds); inst.s_es = SegValue(es); inst.s_fs = SegValue(fs); inst.s_gs = SegValue(gs); inst.s_ss = SegValue(ss); inst.c = get_CF()>0; inst.z = get_ZF()>0; inst.s = get_SF()>0; inst.o = get_OF()>0; inst.a = get_AF()>0; inst.p = get_PF()>0; inst.i = GETFLAGBOOL(IF); if (++logCount >= LOGCPUMAX) logCount = 0; }; void DEBUG_HeavyWriteLogInstruction(void) { if (!logHeavy) return; logHeavy = false; DEBUG_ShowMsg("DEBUG: Creating cpu log LOGCPU_INT_CD.TXT\n"); ofstream out("LOGCPU_INT_CD.TXT"); if (!out.is_open()) { DEBUG_ShowMsg("DEBUG: Failed.\n"); return; } out << hex << noshowbase << setfill('0') << uppercase; Bit32u startLog = logCount; do { // Write Instructions TLogInst & inst = logInst[startLog]; out << setw(4) << inst.s_cs << ":" << setw(8) << inst.eip << " " << inst.dline << " " << inst.res << " EAX:" << setw(8)<< inst.eax << " EBX:" << setw(8) << inst.ebx << " ECX:" << setw(8) << inst.ecx << " EDX:" << setw(8) << inst.edx << " ESI:" << setw(8) << inst.esi << " EDI:" << setw(8) << inst.edi << " EBP:" << setw(8) << inst.ebp << " ESP:" << setw(8) << inst.esp << " DS:" << setw(4) << inst.s_ds << " ES:" << setw(4) << inst.s_es<< " FS:" << setw(4) << inst.s_fs << " GS:" << setw(4) << inst.s_gs<< " SS:" << setw(4) << inst.s_ss << " CF:" << inst.c << " ZF:" << inst.z << " SF:" << inst.s << " OF:" << inst.o << " AF:" << inst.a << " PF:" << inst.p << " IF:" << inst.i << endl; /* fprintf(f,"%04X:%08X %s %s EAX:%08X EBX:%08X ECX:%08X EDX:%08X ESI:%08X EDI:%08X EBP:%08X ESP:%08X DS:%04X ES:%04X FS:%04X GS:%04X SS:%04X CF:%01X ZF:%01X SF:%01X OF:%01X AF:%01X PF:%01X IF:%01X\n", logInst[startLog].s_cs,logInst[startLog].eip,logInst[startLog].dline,logInst[startLog].res,logInst[startLog].eax,logInst[startLog].ebx,logInst[startLog].ecx,logInst[startLog].edx,logInst[startLog].esi,logInst[startLog].edi,logInst[startLog].ebp,logInst[startLog].esp, logInst[startLog].s_ds,logInst[startLog].s_es,logInst[startLog].s_fs,logInst[startLog].s_gs,logInst[startLog].s_ss, logInst[startLog].c,logInst[startLog].z,logInst[startLog].s,logInst[startLog].o,logInst[startLog].a,logInst[startLog].p,logInst[startLog].i);*/ if (++startLog >= LOGCPUMAX) startLog = 0; } while (startLog != logCount); out.close(); DEBUG_ShowMsg("DEBUG: Done.\n"); }; bool DEBUG_HeavyIsBreakpoint(void) { static Bitu zero_count = 0; if (cpuLog) { if (cpuLogCounter>0) { LogInstruction(SegValue(cs),reg_eip,cpuLogFile); cpuLogCounter--; } if (cpuLogCounter<=0) { cpuLogFile.close(); DEBUG_ShowMsg("DEBUG: cpu log LOGCPU.TXT created\n"); cpuLog = false; DEBUG_EnableDebugger(); return true; } } // LogInstruction if (logHeavy) DEBUG_HeavyLogInstruction(); if (zeroProtect) { Bit32u value=0; if (!mem_readd_checked(SegPhys(cs)+reg_eip,&value)) { if (value == 0) zero_count++; else zero_count = 0; } if (GCC_UNLIKELY(zero_count == 10)) E_Exit("running zeroed code"); } if (skipFirstInstruction) { skipFirstInstruction = false; return false; } if (CBreakpoint::CheckBreakpoint(SegValue(cs),reg_eip)) { return true; } return false; } #endif // HEAVY DEBUG #endif // DEBUG<|fim▁end|>
static Bit32u dataOfs; static bool showExtend = true;