prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>PolarGlue.cpp<|end_file_name|><|fim▁begin|>/* Copyright_License { XCSoar Glide Computer - http://www.xcsoar.org/ Copyright (C) 2000-2016 The XCSoar Project A detailed list of copyright holders can be found in the file "AUTHORS". This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. } */ #include "Polar/PolarGlue.hpp" #include "Polar/PolarFileGlue.hpp" #include "Polar/Polar.hpp" #include "Polar/PolarStore.hpp" #include "Parser.hpp" #include "Profile/Profile.hpp"<|fim▁hole|>#include "Language/Language.hpp" #include "Util/StringCompare.hxx" #include <memory> namespace PolarGlue { bool LoadFromOldProfile(PolarInfo &polar); } PolarInfo PolarGlue::GetDefault() { // Return LS8 polar return PolarStore::GetItem(56).ToPolarInfo(); } static bool ReadPolarFileFromProfile(PolarInfo &polar) { std::unique_ptr<NLineReader> reader(OpenConfiguredTextFileA(ProfileKeys::PolarFile)); return reader && PolarGlue::LoadFromFile(polar, *reader); } bool PolarGlue::LoadFromOldProfile(PolarInfo &polar) { unsigned polar_id; if (!Profile::Get(ProfileKeys::PolarID, polar_id)) return false; if (polar_id == 6) return ReadPolarFileFromProfile(polar); if (polar_id == 0) polar_id = 45; else if (polar_id == 1) polar_id = 16; else if (polar_id == 2) polar_id = 56; else if (polar_id == 3) polar_id = 19; else if (polar_id == 4) polar_id = 55; else if (polar_id == 5) polar_id = 118; else { polar_id -= 7; if (polar_id >= PolarStore::Count()) return false; } polar = PolarStore::GetItem(polar_id).ToPolarInfo(); return true; } bool PolarGlue::LoadFromProfile(PolarInfo &polar) { const char *polar_string = Profile::Get(ProfileKeys::Polar); if (polar_string != nullptr && !StringIsEmpty(polar_string) && ParsePolar(polar, polar_string)) { return true; } return LoadFromOldProfile(polar); } PolarInfo PolarGlue::LoadFromProfile() { PolarInfo polar; if (!LoadFromProfile(polar) || !polar.IsValid()) { if (Profile::Exists(ProfileKeys::Polar) || Profile::Exists(ProfileKeys::PolarID)) ShowMessageBox(_("Polar has invalid coefficients.\nUsing LS8 polar instead!"), _("Warning"), MB_OK); polar = GetDefault(); } return polar; }<|fim▁end|>
#include "IO/ConfiguredFile.hpp" #include "IO/LineReader.hpp" #include "Dialogs/Message.hpp"
<|file_name|>ismaster.py<|end_file_name|><|fim▁begin|># Copyright 2014-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Parse a response to the 'ismaster' command.""" import itertools from bson.py3compat import imap from pymongo import common from pymongo.server_type import SERVER_TYPE def _get_server_type(doc): """Determine the server type from an ismaster response.""" if not doc.get('ok'): return SERVER_TYPE.Unknown if doc.get('isreplicaset'): return SERVER_TYPE.RSGhost elif doc.get('setName'): if doc.get('hidden'): return SERVER_TYPE.RSOther elif doc.get('ismaster'): return SERVER_TYPE.RSPrimary elif doc.get('secondary'): return SERVER_TYPE.RSSecondary elif doc.get('arbiterOnly'): return SERVER_TYPE.RSArbiter else: return SERVER_TYPE.RSOther elif doc.get('msg') == 'isdbgrid': return SERVER_TYPE.Mongos else: return SERVER_TYPE.Standalone class IsMaster(object): __slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable') def __init__(self, doc): """Parse an ismaster response from the server.""" self._server_type = _get_server_type(doc) self._doc = doc self._is_writable = self._server_type in ( SERVER_TYPE.RSPrimary, SERVER_TYPE.Standalone, SERVER_TYPE.Mongos) self._is_readable = ( self.server_type == SERVER_TYPE.RSSecondary or self._is_writable) @property def server_type(self): return self._server_type @property def all_hosts(self): """List of hosts, passives, and arbiters known to this server.""" return set(imap(common.clean_node, itertools.chain( self._doc.get('hosts', []), self._doc.get('passives', []), self._doc.get('arbiters', [])))) @property def tags(self): """Replica set member tags or empty dict.""" return self._doc.get('tags', {}) @property def primary(self): """This server's opinion about who the primary is, or None.""" if self._doc.get('primary'): return common.partition_node(self._doc['primary']) else: return None @property def replica_set_name(self): """Replica set name or None.""" return self._doc.get('setName') @property<|fim▁hole|> @property def max_message_size(self): return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size) @property def max_write_batch_size(self): return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE) @property def min_wire_version(self): return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION) @property def max_wire_version(self): return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION) @property def election_id(self): return self._doc.get('electionId') @property def is_writable(self): return self._is_writable @property def is_readable(self): return self._is_readable<|fim▁end|>
def max_bson_size(self): return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE)
<|file_name|>executor.py<|end_file_name|><|fim▁begin|>import asyncio from functools import partial class AsyncWrapper: def __init__(self, target_instance, executor=None): self._target_inst = target_instance self._loop = asyncio.get_event_loop() self._executor = executor def __getattribute__(self, name): try: return super().__getattribute__(name) except AttributeError: method = self._target_inst.__getattribute__(name) return partial(self._async_wrapper, method)<|fim▁hole|> return self._loop.run_in_executor(self._executor, coroutine_wrapped)<|fim▁end|>
async def _async_wrapper(self, method_name, *args, **kwargs): coroutine_wrapped = partial(method_name, *args, **kwargs)
<|file_name|>CWLWorkflow.java<|end_file_name|><|fim▁begin|>package org.rabix.bindings.cwl.bean; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import org.rabix.bindings.cwl.json.CWLStepsDeserializer; import org.rabix.bindings.model.ValidationReport; import org.rabix.common.json.BeanPropertyView; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonView; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; @JsonDeserialize(as = CWLWorkflow.class) public class CWLWorkflow extends CWLJobApp { @JsonProperty("steps") @JsonDeserialize(using = CWLStepsDeserializer.class) private List<CWLStep> steps; @JsonProperty("dataLinks") @JsonView(BeanPropertyView.Full.class) private List<CWLDataLink> dataLinks; public CWLWorkflow() { this.steps = new ArrayList<>(); this.dataLinks = new ArrayList<>(); } public CWLWorkflow(List<CWLStep> steps, List<CWLDataLink> dataLinks) { this.steps = steps; this.dataLinks = dataLinks; } @JsonIgnore public void addDataLink(CWLDataLink dataLink) {<|fim▁hole|> @JsonIgnore public void addDataLinks(List<CWLDataLink> dataLinks) { this.dataLinks.addAll(dataLinks); } public List<CWLStep> getSteps() { return steps; } public List<CWLDataLink> getDataLinks() { return dataLinks; } @Override public String toString() { return "Workflow [steps=" + steps + ", dataLinks=" + dataLinks + ", id=" + getId() + ", context=" + getContext() + ", description=" + getDescription() + ", inputs=" + getInputs() + ", outputs=" + getOutputs() + ", requirements=" + requirements + "]"; } @Override @JsonIgnore public CWLJobAppType getType() { return CWLJobAppType.WORKFLOW; } private Set<String> checkStepDuplicates() { Set<String> duplicates = new HashSet<>(); Set<String> ids = new HashSet<>(); for (CWLStep step : steps) { if (!ids.add(step.getId())) { duplicates.add(step.getId()); } } return duplicates; } // private Set<String> unconnectedOutputs() { // // } // // private Set<String> unconnectedSteps() { // // } // // private Set<String> unconnectedInputs() { // // } @Override public ValidationReport validate() { List<ValidationReport.Item> messages = new ArrayList<>(); messages.addAll(ValidationReport.messagesToItems(validatePortUniqueness(), ValidationReport.Severity.ERROR)); for (String duplicate : checkStepDuplicates()) { messages.add(ValidationReport.error("Duplicate step id: " + duplicate)); } if (steps == null || steps.isEmpty()) { messages.add(ValidationReport.error("Workflow has no steps")); } for (CWLStep step : steps) { for (ValidationReport.Item item : step.getApp().validate().getItems()) { messages.add( ValidationReport.item( item.getSeverity() + " from app in step '" + step.getId() + "': " + item.getMessage(), item.getSeverity()) ); } } return new ValidationReport(messages); } }<|fim▁end|>
this.dataLinks.add(dataLink); }
<|file_name|>quagga.py<|end_file_name|><|fim▁begin|># Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from base import * class Quagga(Container): CONTAINER_NAME = None GUEST_DIR = '/root/config' def __init__(self, host_dir, conf, image='bgperf/quagga'): super(Quagga, self).__init__(self.CONTAINER_NAME, image, host_dir, self.GUEST_DIR, conf) @classmethod def build_image(cls, force=False, tag='bgperf/quagga', checkout='HEAD', nocache=False): cls.dockerfile = ''' FROM ubuntu:latest WORKDIR /root RUN useradd -M quagga RUN mkdir /var/log/quagga && chown quagga:quagga /var/log/quagga RUN mkdir /var/run/quagga && chown quagga:quagga /var/run/quagga RUN apt-get update && apt-get install -qy git autoconf libtool gawk make telnet libreadline6-dev RUN git clone git://git.sv.gnu.org/quagga.git quagga RUN cd quagga && git checkout {0} && ./bootstrap.sh && \ ./configure --disable-doc --localstatedir=/var/run/quagga && make && make install RUN ldconfig '''.format(checkout) super(Quagga, cls).build_image(force, tag, nocache) class QuaggaTarget(Quagga, Target): CONTAINER_NAME = 'bgperf_quagga_target' CONFIG_FILE_NAME = 'bgpd.conf' def write_config(self, scenario_global_conf): config = """hostname bgpd password zebra router bgp {0} bgp router-id {1} """.format(self.conf['as'], self.conf['router-id']) def gen_neighbor_config(n): local_addr = n['local-address'] c = """neighbor {0} remote-as {1} neighbor {0} advertisement-interval 1 neighbor {0} route-server-client neighbor {0} timers 30 90 """.format(local_addr, n['as']) if 'filter' in n: for p in (n['filter']['in'] if 'in' in n['filter'] else []): c += 'neighbor {0} route-map {1} export\n'.format(local_addr, p) return c with open('{0}/{1}'.format(self.host_dir, self.CONFIG_FILE_NAME), 'w') as f: f.write(config) for n in list(flatten(t.get('neighbors', {}).values() for t in scenario_global_conf['testers'])) + [scenario_global_conf['monitor']]: f.write(gen_neighbor_config(n)) if 'policy' in scenario_global_conf: seq = 10 for k, v in scenario_global_conf['policy'].iteritems(): match_info = [] for i, match in enumerate(v['match']): n = '{0}_match_{1}'.format(k, i) if match['type'] == 'prefix':<|fim▁hole|> f.write('ip as-path access-list {0} permit .*\n'.format(n)) elif match['type'] == 'community': f.write(''.join('ip community-list standard {0} permit {1}\n'.format(n, p) for p in match['value'])) f.write('ip community-list standard {0} permit\n'.format(n)) elif match['type'] == 'ext-community': f.write(''.join('ip extcommunity-list standard {0} permit {1} {2}\n'.format(n, *p.split(':', 1)) for p in match['value'])) f.write('ip extcommunity-list standard {0} permit\n'.format(n)) match_info.append((match['type'], n)) f.write('route-map {0} permit {1}\n'.format(k, seq)) for info in match_info: if info[0] == 'prefix': f.write('match ip address prefix-list {0}\n'.format(info[1])) elif info[0] == 'as-path': f.write('match as-path {0}\n'.format(info[1])) elif info[0] == 'community': f.write('match community {0}\n'.format(info[1])) elif info[0] == 'ext-community': f.write('match extcommunity {0}\n'.format(info[1])) seq += 10 def get_startup_cmd(self): return '\n'.join( ['#!/bin/bash', 'ulimit -n 65536', 'bgpd -u root -f {guest_dir}/{config_file_name}'] ).format( guest_dir=self.guest_dir, config_file_name=self.CONFIG_FILE_NAME)<|fim▁end|>
f.write(''.join('ip prefix-list {0} deny {1}\n'.format(n, p) for p in match['value'])) f.write('ip prefix-list {0} permit any\n'.format(n)) elif match['type'] == 'as-path': f.write(''.join('ip as-path access-list {0} deny _{1}_\n'.format(n, p) for p in match['value']))
<|file_name|>ela-sep.py<|end_file_name|><|fim▁begin|>import sys from pymongo import MongoClient from werkzeug.utils import secure_filename import os import sys client = MongoClient('mongodb://localhost:27017/') db = client.ir #li=[] #color=open("AllColors.txt","r") doc1=[] doc2=[] edgeConWT=[] edgeElaWT=[] edgeStart=[] edgeEnd=[] path="JVcode/Scripts/ForClassification/" for file in os.listdir(path): edgeElaWT = [] edgeConWT = [] edgeStart = [] edgeEnd = [] print (file) if file.endswith(".tab.scores"): fdTemp=open(path+file,"r") #fdOut=open("output/new/elab-"+file,"w+") for i1 in fdTemp: line=i1.split(" ") #print line edgeStart.append(line[0]) edgeEnd.append(line[1]) edgeConWT.append(float(line[2])) if(float(line[3]))>0: edgeElaWT.append(float(line[3])) else: edgeElaWT.append(0.0) for i in range(0,len(edgeElaWT)): for j in range(0, len(edgeElaWT)): if (j < (len(edgeConWT) - 1)): if (edgeElaWT[j] < edgeElaWT[j + 1]): temp = edgeElaWT[j] edgeElaWT[j] = edgeElaWT[j + 1] edgeElaWT[j + 1] = temp temp2 = edgeStart[j] edgeStart[j] = edgeStart[j + 1] edgeStart[j + 1] = temp2 temp3 = edgeEnd[j] edgeEnd[j] = edgeEnd[j + 1] edgeEnd[j + 1] = temp3 #print (edgeEnd,edgeElaWT) t2 = [] for k in range(0,5): results = db.papers.find_one({'filename': edgeEnd[k][:-3] + 'pdf'}) print results h={} h['name'] = results['_id'] h['domain']=results['domain'] t2.append(h) print ("To update : ",t2) print("for => ",file) str1="db.rPaper.update({'filename':"+ file[:-10]+"'pdf'}, {'$set': {'elaboration':}})"<|fim▁hole|>print "DONE"<|fim▁end|>
print(str1) results = db.rPaper.update({'filename': file[:-10]+'pdf'}, {'$set': {'elaboration': t2}}) print (results)
<|file_name|>options.js<|end_file_name|><|fim▁begin|>/** * options.js * * A test helper to detect which html-snapshots options to use * * phantomjs * If a global phantomjs is defined, decorates html-snapshots options to specify that global * In some test environments (travis), local phantomjs will not install if a global is found. */<|fim▁hole|> // for now, callback is passed true if global phantomjs should be used detector: callback => { // try to run phantom globally const cp = spawn("phantomjs", ["--version"]); // if it fails, use local per the defaults cp.on("error", () => { callback(false); }); // if it succeeds, use the global cp.on("exit", code => { if (code === 0) { callback(true); } }); } };<|fim▁end|>
const spawn = require("child_process").spawn; module.exports = {
<|file_name|>constants.py<|end_file_name|><|fim▁begin|># Copyright 2016 Twitter. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.<|fim▁hole|>INTEGRATION_TEST_TERMINAL = "__integration_test_mock_terminal" INTEGRATION_TEST_CONTROL_STREAM_ID = "__integration_test_control_stream_id" # internal config key MAX_EXECUTIONS = 10 HTTP_POST_URL_KEY = "http.post.url" # user defined config key USER_SPOUT_CLASSPATH = "user.spout.classpath" USER_BOLT_CLASSPATH = "user.bolt.classpath" # user defined max executions USER_MAX_EXECUTIONS = "user.max.exec"<|fim▁end|>
'''constants.py: constants for integration test for pyheron''' INTEGRATION_TEST_MOCK_MESSAGE_ID = "__integration_test_mock_message_id"
<|file_name|>pl-pl.py<|end_file_name|><|fim▁begin|># coding: utf8 { '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Uaktualnij" jest dodatkowym wyra\xc5\xbceniem postaci "pole1=\'nowawarto\xc5\x9b\xc4\x87\'". Nie mo\xc5\xbcesz uaktualni\xc4\x87 lub usun\xc4\x85\xc4\x87 wynik\xc3\xb3w z JOIN:', '%Y-%m-%d': '%Y-%m-%d', '%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', '%s rows deleted': 'Wierszy usuni\xc4\x99tych: %s', '%s rows updated': 'Wierszy uaktualnionych: %s', 'Available databases and tables': 'Dost\xc4\x99pne bazy danych i tabele', 'Cannot be empty': 'Nie mo\xc5\xbce by\xc4\x87 puste', 'Change Password': 'Change Password', 'Check to delete': 'Zaznacz aby usun\xc4\x85\xc4\x87', 'Controller': 'Controller', 'Copyright': 'Copyright', 'Current request': 'Aktualne \xc5\xbc\xc4\x85danie', 'Current response': 'Aktualna odpowied\xc5\xba', 'Current session': 'Aktualna sesja', 'DB Model': 'DB Model', 'Database': 'Database',<|fim▁hole|>'Edit': 'Edit', 'Edit Profile': 'Edit Profile', 'Edit This App': 'Edit This App', 'Edit current record': 'Edytuj aktualny rekord', 'Hello World': 'Witaj \xc5\x9awiecie', 'Import/Export': 'Importuj/eksportuj', 'Index': 'Index', 'Internal State': 'Stan wewn\xc4\x99trzny', 'Invalid Query': 'B\xc5\x82\xc4\x99dne zapytanie', 'Layout': 'Layout', 'Login': 'Zaloguj', 'Logout': 'Logout', 'Lost Password': 'Przypomnij has\xc5\x82o', 'Main Menu': 'Main Menu', 'Menu Model': 'Menu Model', 'New Record': 'Nowy rekord', 'No databases in this application': 'Brak baz danych w tej aplikacji', 'Powered by': 'Powered by', 'Query:': 'Zapytanie:', 'Register': 'Zarejestruj', 'Rows in table': 'Wiersze w tabeli', 'Rows selected': 'Wybrane wiersze', 'Stylesheet': 'Stylesheet', 'Sure you want to delete this object?': 'Czy na pewno chcesz usun\xc4\x85\xc4\x87 ten obiekt?', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'warto\xc5\x9b\xc4\x87\'". Takie co\xc5\x9b jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.', 'Update:': 'Uaktualnij:', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'U\xc5\xbcyj (...)&(...) jako AND, (...)|(...) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapyta\xc5\x84.', 'View': 'View', 'Welcome %s': 'Welcome %s', 'Welcome to web2py': 'Witaj w web2py', 'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel', 'cache': 'cache', 'change password': 'change password', 'Online examples': 'Kliknij aby przej\xc5\x9b\xc4\x87 do interaktywnych przyk\xc5\x82ad\xc3\xb3w', 'Administrative interface': 'Kliknij aby przej\xc5\x9b\xc4\x87 do panelu administracyjnego', 'customize me!': 'dostosuj mnie!', 'data uploaded': 'dane wys\xc5\x82ane', 'database': 'baza danych', 'database %s select': 'wyb\xc3\xb3r z bazy danych %s', 'db': 'baza danych', 'design': 'projektuj', 'done!': 'zrobione!', 'edit profile': 'edit profile', 'export as csv file': 'eksportuj jako plik csv', 'insert new': 'wstaw nowy rekord tabeli', 'insert new %s': 'wstaw nowy rekord do tabeli %s', 'invalid request': 'B\xc5\x82\xc4\x99dne \xc5\xbc\xc4\x85danie', 'login': 'login', 'logout': 'logout', 'new record inserted': 'nowy rekord zosta\xc5\x82 wstawiony', 'next 100 rows': 'nast\xc4\x99pne 100 wierszy', 'or import from csv file': 'lub zaimportuj z pliku csv', 'previous 100 rows': 'poprzednie 100 wierszy', 'record': 'record', 'record does not exist': 'rekord nie istnieje', 'record id': 'id rekordu', 'register': 'register', 'selected': 'wybranych', 'state': 'stan', 'table': 'tabela', 'unable to parse csv file': 'nie mo\xc5\xbcna sparsowa\xc4\x87 pliku csv', }<|fim▁end|>
'Delete:': 'Usu\xc5\x84:',
<|file_name|>config.js<|end_file_name|><|fim▁begin|>var path = require('path'), fs = require('fs'), Source = require(hexo.lib_dir + '/core/source'), config_dir = path.dirname(hexo.configfile), config = hexo.config; function testver(){ var ver = hexo.env.version.split('.'); var test = true; if (ver[0] < 2) test = false; else if (ver[0] == 2 && ver[1] < 5) test = false; if (test) return; var hexo_curver = 'hexo'.red + (' V' + hexo.env.version).green; var theme_curver = 'chenall'.green + ' V2.2'.red; var error = 'Current version of ' + hexo_curver + ' Does not apply to the theme ' + theme_curver; error += ',Please use theme ' + 'chenall '.green + 'V1.0'.red + ' or upgrade to' + ' hexo 2.5.0 '.green + 'or latest.'; error +='\n\n\t当前版本 ' + hexo_curver + ' 不适用于主题 ' + theme_curver + '\n请使用' + 'chenall '.green + 'V1.0'.red + ' 版主题或升级hexo到' + ' 2.5.0 '.green + '以上'; error +='\n\nchenall V1.0:\n' + 'svn co https://github.com/chenall/hexo-theme-chenall/tags/V1.0 themes/chenall'.yellow; error +='\n\nhexo latest(升级):\n\t' + 'npm update hexo'.yellow; error +='\n\nhexo V2.5.X(安装指定版本):\n\t' + 'npm install [email protected] -g'.yellow; error += '\n\n\t有什么疑问可以联系我 http://chenall.net'; hexo.log.e(error); process.exit(1); } function checkenv(){ <|fim▁hole|> if (!store['md']) error += '\tnpm install hexo-renderer-marked\n'; if (!store['styl']) error +='\tnpm install hexo-renderer-stylus\n'; if (error){ hexo.log.e('\t主题使用环境检测失败\n\n\t缺少必要插件,请使用以下命令安装:\n\n',error); process.exit(1); } } testver(); checkenv(); if (hexo.hasOwnProperty('env') && hexo.env.hasOwnProperty('debug')) hexo.debug = hexo.env.debug; hexo.__dump = function(obj){ var cache = []; return JSON.stringify(obj,function(key, value){ if (typeof value === 'object' && value !== null) { if (cache.indexOf(value) !== -1) { // Circular reference found, discard key return; } cache.push(value); } return value; }); } if (config.CustomDir && typeof(config.CustomDir) == 'object'){ var joinPath = function(){ var str = path.join.apply(this, arguments); if (str[str.length - 1] !== path.sep) str += path.sep; return str; }; var custom = config.CustomDir; ['public_dir','source_dir','scaffold_dir'].forEach(function(p){ if (!custom[p]) return; if (custom[p] == 'auto'){ hexo.constant(p,joinPath(config_dir,p)); } else { var test = custom[p].match(/^:config(.*)$/); if (test){ hexo.constant(p,joinPath(config_dir,test[1])); } else { hexo.constant(p,joinPath(config_dir,custom[p])); } } }) hexo.source = new Source(); } var load_default_usercfg = function(){ var cfg = global.usercfg = { ajax_widgets: true, updated: true, cached_widgets:true }; cfg.twbs_style = ['primary','success','info','warning','danger']; var user_cfg = hexo.source_dir + '_' + hexo.config.theme + '.yml'; if (!fs.existsSync(user_cfg)){ user_cfg = hexo.theme_dir + '_config.yml'; cfg.themeconfig = hexo.render.renderSync({path: user_cfg}); hexo.log.i("Theme config file: " + user_cfg.green); } cfg.twbs_sty = function(i){return cfg.twbs_style[i%4];} hexo.log.d('Using theme ' + 'chenall V2.2'.green); } hexo.on('ready',load_default_usercfg);<|fim▁end|>
var store = hexo.extend.renderer.store; var error = ''; if (!store['ejs']) error += '\tnpm install hexo-renderer-ejs\n';
<|file_name|>test-workers.js<|end_file_name|><|fim▁begin|><|fim▁hole|> self.addEventListener('message', function (msg) { if (msg.data === 'Hello') { self.postMessage(1); } else if (msg.data instanceof self.ArrayBuffer) { var view = new Int32Array(msg.data); if (view[0] === 2) { self.postMessage(3); } } });<|fim▁end|>
self.postMessage(0);
<|file_name|>DataSourceServiceTest.java<|end_file_name|><|fim▁begin|>/** * */ package br.com.geocab.tests.service; import org.junit.Assert; import org.junit.Test; import org.springframework.beans.factory.annotation.Autowired; import br.com.geocab.domain.entity.datasource.DataSource; import br.com.geocab.domain.service.DataSourceService; import br.com.geocab.tests.AbstractIntegrationTest; import com.github.springtestdbunit.annotation.DatabaseOperation; import com.github.springtestdbunit.annotation.DatabaseSetup; /** * @author Lucas * */ public class DataSourceServiceTest extends AbstractIntegrationTest { @Autowired private DataSourceService dataSourceService; @Test @DatabaseSetup(type=DatabaseOperation.INSERT, value={ "/dataset/AccountDataSet.xml" }) public void insertDataSouce() { this.authenticate(100L); DataSource dataSource = new DataSource(); dataSource.setName("Data Source"); dataSource.setLogin("user"); dataSource.setPassword("password123"); // dataSource.setInternal(true); dataSource.setUrl("url1"); dataSource = dataSourceService.insertDataSource(dataSource); Assert.assertNotNull(dataSource); Assert.assertEquals("Data Source", dataSource.getName()); Assert.assertEquals("user", dataSource.getLogin()); Assert.assertEquals("password123", dataSource.getPassword()); Assert.assertEquals("url1", dataSource.getUrl()); // Assert.assertTrue(dataSource.getInternal()); } @Test @DatabaseSetup(type=DatabaseOperation.INSERT, value={ "/dataset/DataSourceDataSet.xml", "/dataset/AccountDataSet.xml" }) public void updateDataSource() throws Exception { this.authenticate(100L); DataSource dataSource = dataSourceService.findDataSourceById(100L); dataSource.setName("Data Source changed"); dataSource.setLogin("user changed"); dataSource.setPassword("password123 changed"); dataSource.setUrl("url1 changed"); // dataSource.setInternal(false); dataSource = dataSourceService.updateDataSource(dataSource); Assert.assertNotNull(dataSource); Assert.assertEquals("Data Source changed", dataSource.getName()); Assert.assertEquals("user changed", dataSource.getLogin()); Assert.assertEquals("password123 changed", dataSource.getPassword()); Assert.assertEquals("url1 changed", dataSource.getUrl()); // Assert.assertFalse(dataSource.getInternal()); } @Test @DatabaseSetup(type=DatabaseOperation.INSERT, value={ "/dataset/DataSourceDataSet.xml", "/dataset/AccountDataSet.xml" }) public void findFonteDadosById() throws Exception { this.authenticate(100L); DataSource dataSource = dataSourceService.findDataSourceById(100L); Assert.assertNotNull(dataSource); Assert.assertTrue(dataSource.getId().equals(100L)); } @Test @DatabaseSetup(type=DatabaseOperation.INSERT, value={ "/dataset/DataSourceDataSet.xml",<|fim▁hole|> }) public void removeDataSource() { this.authenticate(100L); this.dataSourceService.removeDataSource(100L); } }<|fim▁end|>
"/dataset/AccountDataSet.xml"
<|file_name|>receipt.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, 2016 Ethcore (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Receipt use util::{H256, U256, Address}; use util::HeapSizeOf; use rlp::*; use basic_types::LogBloom; use header::BlockNumber; use log_entry::{LogEntry, LocalizedLogEntry}; /// Information describing execution of a transaction. #[derive(Default, Debug, Clone, Binary)] pub struct Receipt { /// The state root after executing the transaction. pub state_root: H256, /// The total gas used in the block following execution of the transaction. pub gas_used: U256, /// The OR-wide combination of all logs' blooms for this transaction. pub log_bloom: LogBloom, /// The logs stemming from this transaction. pub logs: Vec<LogEntry>, } impl Receipt { /// Create a new receipt. pub fn new(state_root: H256, gas_used: U256, logs: Vec<LogEntry>) -> Receipt { Receipt { state_root: state_root, gas_used: gas_used, log_bloom: logs.iter().fold(LogBloom::default(), |mut b, l| { b = &b | &l.bloom(); b }), //TODO: use |= operator logs: logs, } } } impl Encodable for Receipt { fn rlp_append(&self, s: &mut RlpStream) { s.begin_list(4);<|fim▁hole|> s.append(&self.gas_used); s.append(&self.log_bloom); s.append(&self.logs); } } impl Decodable for Receipt { fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder { let d = decoder.as_rlp(); let receipt = Receipt { state_root: try!(d.val_at(0)), gas_used: try!(d.val_at(1)), log_bloom: try!(d.val_at(2)), logs: try!(d.val_at(3)), }; Ok(receipt) } } impl HeapSizeOf for Receipt { fn heap_size_of_children(&self) -> usize { self.logs.heap_size_of_children() } } /// Receipt with additional info. #[derive(Debug, Clone, PartialEq, Binary)] pub struct RichReceipt { /// Transaction hash. pub transaction_hash: H256, /// Transaction index. pub transaction_index: usize, /// The total gas used in the block following execution of the transaction. pub cumulative_gas_used: U256, /// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`. pub gas_used: U256, /// Contract address. pub contract_address: Option<Address>, /// Logs pub logs: Vec<LogEntry>, /// Logs bloom pub log_bloom: LogBloom, /// State root pub state_root: H256, } /// Receipt with additional info. #[derive(Debug, Clone, PartialEq, Binary)] pub struct LocalizedReceipt { /// Transaction hash. pub transaction_hash: H256, /// Transaction index. pub transaction_index: usize, /// Block hash. pub block_hash: H256, /// Block number. pub block_number: BlockNumber, /// The total gas used in the block following execution of the transaction. pub cumulative_gas_used: U256, /// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`. pub gas_used: U256, /// Contract address. pub contract_address: Option<Address>, /// Logs pub logs: Vec<LocalizedLogEntry>, /// Logs bloom pub log_bloom: LogBloom, /// State root pub state_root: H256, } #[test] fn test_basic() { let expected = ::rustc_serialize::hex::FromHex::from_hex("f90162a02f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee83040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap(); let r = Receipt::new( "2f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee".into(), 0x40cae.into(), vec![LogEntry { address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(), topics: vec![], data: vec![0u8; 32] }] ); assert_eq!(&encode(&r)[..], &expected[..]); }<|fim▁end|>
s.append(&self.state_root);
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#[macro_use] extern crate rustfbp; agent! { input(input: any), output(output: any), option(any),<|fim▁hole|> msg_input.vec = opt.vec.clone(); msg_input.action = opt.action.clone(); try!(self.output.output.send(msg_input)); Ok(End) } }<|fim▁end|>
fn run(&mut self) -> Result<Signal> { debug!("{:?}", env!("CARGO_PKG_NAME")); let opt = self.recv_option(); let mut msg_input = try!(self.input.input.recv());
<|file_name|>test_triggersqls.py<|end_file_name|><|fim▁begin|>""" Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from mpp.models import SQLTestCase ''' Trigger sqls for create_tests ''' class TestTriggerSQLClass(SQLTestCase): ''' This class contains all the sqls that are part of the trigger phase The sqls in here will get suspended by one of the faults that are triggered in the main run @gpdiff False ''' sql_dir = 'sql/' <|fim▁hole|> only aimed SQLs without unexpected setup. Just make this no-op. """ pass<|fim▁end|>
@classmethod def setUpClass(cls): """ Since some operation in this scenario is blocked, we want to run
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals from django.db import models # Create your models here. <|fim▁hole|> An abstract base class model that provides self-updating "created" and "modified" fields. """ created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) class Meta: abstract = True<|fim▁end|>
class TimeStampedModel(models.Model): """
<|file_name|>gen_overlay_widgets.py<|end_file_name|><|fim▁begin|>#! /usr/bin/python # Copyright 2019 The ANGLE Project Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # gen_overlay_widgets.py: # Code generation for overlay widgets. Should be run when the widgets declaration file, # overlay_widgets.json, is changed. # NOTE: don't run this script directly. Run scripts/run_code_generation.py. from datetime import date import json import sys out_file = 'Overlay_autogen.cpp' in_file = 'overlay_widgets.json' template_out_file = u"""// GENERATED FILE - DO NOT EDIT. // Generated by {script_name} using data from {input_file_name}. // // Copyright {copyright_year} The ANGLE Project Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // {out_file_name}: // Autogenerated overlay widget declarations. #include "libANGLE/renderer/driver_utils.h" #include "libANGLE/Overlay.h" #include "libANGLE/OverlayWidgets.h" #include "libANGLE/Overlay_font_autogen.h" namespace gl {{ using namespace overlay; namespace {{ int GetFontSize(int fontSize, bool largeFont) {{ if (largeFont && fontSize > 0) {{ return fontSize - 1; }} return fontSize; }} }} // anonymous namespace void Overlay::initOverlayWidgets() {{ const bool kLargeFont = rx::IsAndroid(); {init_widgets} }} }} // namespace gl """ template_init_widget = u"""{{ const int32_t fontSize = GetFontSize({font_size}, kLargeFont); const int32_t offsetX = {offset_x}; const int32_t offsetY = {offset_y}; const int32_t width = {width}; const int32_t height = {height}; widget->{subwidget}type = WidgetType::{type}; widget->{subwidget}fontSize = fontSize; widget->{subwidget}coords[0] = {coord0}; widget->{subwidget}coords[1] = {coord1}; widget->{subwidget}coords[2] = {coord2}; widget->{subwidget}coords[3] = {coord3}; widget->{subwidget}color[0] = {color_r}; widget->{subwidget}color[1] = {color_g}; widget->{subwidget}color[2] = {color_b}; widget->{subwidget}color[3] = {color_a}; }} """ def extract_type_and_constructor(properties): constructor = properties['type'] args_separated = constructor.split('(', 1) if len(args_separated) == 1: return constructor, constructor type_no_constructor = args_separated[0] return type_no_constructor, constructor def get_font_size_constant(properties): return 'kFontLayer' + properties['font'].capitalize() def is_graph_type(type): return type == 'RunningGraph' or type == 'RunningHistogram' def is_text_type(type): return not is_graph_type(type) class OverlayWidget: def __init__(self, properties, is_graph_description=False): if not is_graph_description: self.name = properties['name'] self.type, self.constructor = extract_type_and_constructor(properties) self.extract_common(properties) if is_graph_type(self.type): description_properties = properties['description'] description_properties['type'] = 'Text' self.description = OverlayWidget(description_properties, True) def extract_common(self, properties): self.color = properties['color'] self.coords = properties['coords'] if is_graph_type(self.type): self.bar_width = properties['bar_width'] self.height = properties['height'] else: self.font = get_font_size_constant(properties) self.length = properties['length'] self.negative_alignment = [False, False] def is_negative_coord(coords, axis, widgets_so_far): if isinstance(coords[axis], unicode): coord_split = coords[axis].split('.') # The coordinate is in the form other_widget.edge.mode # We simply need to know if other_widget's coordinate is negative or not. return widgets_so_far[coord_split[0]].negative_alignment[axis] return coords[axis] < 0 def set_alignment_flags(overlay_widget, widgets_so_far): overlay_widget.negative_alignment[0] = is_negative_coord(overlay_widget.coords, 0, widgets_so_far) overlay_widget.negative_alignment[1] = is_negative_coord(overlay_widget.coords, 1, widgets_so_far) if is_graph_type(overlay_widget.type): set_alignment_flags(overlay_widget.description, widgets_so_far) <|fim▁hole|>def get_offset_helper(widget, axis, smaller_coord_side): # Assume axis is X. This function returns two values: # - An offset where the bounding box is placed at, # - Whether this offset is for the left or right edge. # # The input coordinate (widget.coord[axis]) is either: # # - a number: in this case, the offset is that number, and its sign determines whether this refers to the left or right edge of the bounding box. # - other_widget.edge.mode: this has multiple possibilities: # * edge=left, mode=align: the offset is other_widget.left, the edge is left. # * edge=left, mode=adjacent: the offset is other_widget.left, the edge is right. # * edge=right, mode=align: the offset is other_widget.right, the edge is right. # * edge=right, mode=adjacent: the offset is other_widget.right, the edge is left. # # The case for the Y axis is similar, with the edge values being top or bottom. coord = widget.coords[axis] if not isinstance(coord, unicode): is_left = coord >= 0 return coord, is_left coord_split = coord.split('.') is_left = coord_split[1] == smaller_coord_side is_align = coord_split[2] == 'align' other_widget_coords = 'mState.mOverlayWidgets[WidgetId::' + coord_split[0] + ']->coords' other_widget_coord_index = axis + (0 if is_left else 2) offset = other_widget_coords + '[' + str(other_widget_coord_index) + ']' return offset, is_left == is_align def get_offset_x(widget): return get_offset_helper(widget, 0, 'left') def get_offset_y(widget): return get_offset_helper(widget, 1, 'top') def get_bounding_box_coords(offset, width, offset_is_left, is_left_aligned): # See comment in generate_widget_init_helper. This function is implementing the following: # # - offset_is_left && is_left_aligned: [offset, offset + width] # - offset_is_left && !is_left_aligned: [offset, std::min(offset + width, -1)] # - !offset_is_left && is_left_aligned: [std::max(1, offset - width), offset] # - !offset_is_left && !is_left_aligned: [offset - width, offset] coord_left = offset if offset_is_left else (offset + ' - ' + width) coord_right = (offset + ' + ' + width) if offset_is_left else offset if offset_is_left and not is_left_aligned: coord_right = 'std::min(' + coord_right + ', -1)' if not offset_is_left and is_left_aligned: coord_left = 'std::max(' + coord_left + ', 1)' return coord_left, coord_right def generate_widget_init_helper(widget, is_graph_description=False): font_size = '0' # Common attributes color = [channel / 255.0 for channel in widget.color] offset_x, offset_x_is_left = get_offset_x(widget) offset_y, offset_y_is_top = get_offset_y(widget) if is_text_type(widget.type): # Attributes deriven from text properties font_size = widget.font width = str(widget.length) + ' * kFontGlyphWidths[fontSize]' height = 'kFontGlyphHeights[fontSize]' else: # Attributes deriven from graph properties width = str(widget.bar_width) + ' * static_cast<uint32_t>(widget->runningValues.size())' height = widget.height is_left_aligned = not widget.negative_alignment[0] is_top_aligned = not widget.negative_alignment[1] # We have offset_x, offset_y, width and height which together determine the bounding box. If # offset_x_is_left, the bounding box X would be in [offset_x, offset_x + width], otherwise it # would be in [offset_x - width, offset_x]. Similarly for y. Since we use negative values to # mean aligned to the right side of the screen, we need to make sure that: # # - if left aligned: offset_x - width is at minimum 1 # - if right aligned: offset_x + width is at maximum -1 # # We therefore have the following combinations for the X axis: # # - offset_x_is_left && is_left_aligned: [offset_x, offset_x + width] # - offset_x_is_left && !is_left_aligned: [offset_x, std::min(offset_x + width, -1)] # - !offset_x_is_left && is_left_aligned: [std::max(1, offset_x - width), offset_x] # - !offset_x_is_left && !is_left_aligned: [offset_x - width, offset_x] # # Similarly for y. coord0, coord2 = get_bounding_box_coords('offsetX', 'width', offset_x_is_left, is_left_aligned) coord1, coord3 = get_bounding_box_coords('offsetY', 'height', offset_y_is_top, is_top_aligned) return template_init_widget.format( subwidget='description.' if is_graph_description else '', offset_x=offset_x, offset_y=offset_y, width=width, height=height, type=widget.type, font_size=font_size, coord0=coord0, coord1=coord1, coord2=coord2, coord3=coord3, color_r=color[0], color_g=color[1], color_b=color[2], color_a=color[3]) def generate_widget_init(widget): widget_init = '{\n' + widget.type + ' *widget = new ' + widget.constructor + ';\n' widget_init += generate_widget_init_helper(widget) widget_init += 'mState.mOverlayWidgets[WidgetId::' + widget.name + '].reset(widget);\n' if is_graph_type(widget.type): widget_init += generate_widget_init_helper(widget.description, True) widget_init += '}\n' return widget_init def main(): if len(sys.argv) == 2 and sys.argv[1] == 'inputs': print(in_file) return if len(sys.argv) == 2 and sys.argv[1] == 'outputs': print(out_file) return with open(in_file) as fin: layout = json.loads(fin.read()) # Read the layouts from the json file and determine alignment of widgets (as they can refer to # other widgets. overlay_widgets = {} for widget_properties in layout['widgets']: widget = OverlayWidget(widget_properties) overlay_widgets[widget.name] = widget set_alignment_flags(widget, overlay_widgets) # Go over the widgets again and generate initialization code. Note that we need to iterate over # the widgets in order, so we can't use the overlay_widgets dictionary for iteration. init_widgets = [] for widget_properties in layout['widgets']: init_widgets.append(generate_widget_init(overlay_widgets[widget_properties['name']])) with open(out_file, 'w') as outfile: outfile.write( template_out_file.format( script_name=__file__, copyright_year=date.today().year, input_file_name=in_file, out_file_name=out_file, init_widgets='\n'.join(init_widgets))) outfile.close() if __name__ == '__main__': sys.exit(main())<|fim▁end|>
<|file_name|>get_sql_server_info.py<|end_file_name|><|fim▁begin|>from lib.common import helpers class Module: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'Get-SQLServerInfo', 'Author': ['@_nullbind', '@0xbadjuju'], 'Description': ('Returns basic server and user information from target SQL Servers.'), 'Background' : True, 'OutputExtension' : None, 'NeedsAdmin' : False, 'OpsecSafe' : True, 'Language' : 'powershell', 'MinPSVersion' : '2', 'MinLanguageVersion' : '2', 'Comments': [ 'https://github.com/NetSPI/PowerUpSQL/blob/master/PowerUpSQL.ps1' ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent' : {<|fim▁hole|> 'Description' : 'Agent to run module on.', 'Required' : True, 'Value' : '' }, 'Username' : { 'Description' : 'SQL Server or domain account to authenticate with.', 'Required' : False, 'Value' : '' }, 'Password' : { 'Description' : 'SQL Server or domain account password to authenticate with.', 'Required' : False, 'Value' : '' }, 'Instance' : { 'Description' : 'SQL Server instance to connection to.', 'Required' : False, 'Value' : '' }, 'CheckAll' : { 'Description' : 'Check all systems retrieved by Get-SQLInstanceDomain', 'Required' : False, 'Value' : '' } } self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self, obfuscate=False, obfuscationCommand=""): username = self.options['Username']['Value'] password = self.options['Password']['Value'] instance = self.options['Instance']['Value'] check_all = self.options['CheckAll']['Value'] # read in the common module source code moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Get-SQLServerInfo.ps1" script = "" if obfuscate: helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand) moduleSource = moduleSource.replace("module_source", "obfuscated_module_source") try: with open(moduleSource, 'r') as source: script = source.read() except: print helpers.color("[!] Could not read module source path at: " + str(moduleSource)) return "" if check_all: auxModuleSource = self.mainMenu.installPath + "data/module_source/situational_awareness/network/Get-SQLInstanceDomain.ps1" if obfuscate: helpers.obfuscate_module(moduleSource=auxModuleSource, obfuscationCommand=obfuscationCommand) auxModuleSource = moduleSource.replace("module_source", "obfuscated_module_source") try: with open(auxModuleSource, 'r') as auxSource: auxScript = auxSource.read() script += " " + auxScript except: print helpers.color("[!] Could not read additional module source path at: " + str(auxModuleSource)) scriptEnd = " Get-SQLInstanceDomain " if username != "": scriptEnd += " -Username "+username if password != "": scriptEnd += " -Password "+password scriptEnd += " | " scriptEnd += " Get-SQLServerInfo" if username != "": scriptEnd += " -Username "+username if password != "": scriptEnd += " -Password "+password if instance != "" and not check_all: scriptEnd += " -Instance "+instance if obfuscate: scriptEnd = helpers.obfuscate(psScript=scriptEnd, obfuscationCommand=obfuscationCommand) script += scriptEnd return script<|fim▁end|>
<|file_name|>test_menu.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import curses import npyscreen from vent.helpers.paths import PathDirs from vent.menu import VentApp from vent.menus.main import MainForm npyscreen.TEST_SETTINGS['CONTINUE_AFTER_TEST_INPUT'] = False def run_menu(test_input): """ Actually run the menu and process any input """ # initialize tutorial paths = PathDirs() first_time = paths.ensure_file(paths.init_file) assert first_time[0] == True npyscreen.TEST_SETTINGS['TEST_INPUT'] = test_input A = VentApp() try: A.run(fork=False) except npyscreen.ExhaustedTestInput as e: pass def test_tools_status(): """ Test the staticmethod tools_status """ a, b = MainForm.t_status(True) assert isinstance(a, str) assert isinstance(b, tuple) def test_menu(): """ Run menu tests """ CTRL_Q = '^Q' CTRL_T = '^T' CTRL_X = '^X' CTRL_V = '^V' ENTER = curses.ascii.CR TAB = curses.ascii.TAB LEFT = curses.KEY_LEFT RIGHT = curses.KEY_RIGHT DOWN = curses.KEY_DOWN SPACE = curses.ascii.SP BACKSPACE = curses.ascii.BS # go through help menus run_menu([ENTER, CTRL_T, CTRL_X, 'b', 'm', ENTER, ENTER, CTRL_X, 'b', 'p', ENTER, ENTER, CTRL_X, 'b', 't', ENTER, ENTER, CTRL_X, 'b', 'f', ENTER, ENTER, CTRL_X, 'b', 'c', ENTER, ENTER, CTRL_X, 'b', 's', ENTER, ENTER, CTRL_X, 'p', 'a', ENTER, ENTER, CTRL_X, 'p', 'b', ENTER, ENTER, ENTER]) # go to help menu and leave again run_menu([ENTER, CTRL_T, RIGHT, ENTER]) # go through the core tools menus # install run_menu([ENTER, CTRL_X, 'c', 'i', ENTER]) # build - ok run_menu([ENTER, CTRL_X, 'c', 'b', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, RIGHT, ENTER, ENTER, ENTER]) # build - cancel run_menu([ENTER, CTRL_X, 'c', 'b', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER]) # build - quit back to main run_menu([ENTER, CTRL_X, 'c', 'b', CTRL_Q]) # build - toggle to main run_menu([ENTER, CTRL_X, 'c', 'b', CTRL_T]) # start - ok run_menu([ENTER, CTRL_X, 'c', 's', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER, ENTER, ENTER]) # start - cancel run_menu([ENTER, CTRL_X, 'c', 's', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER]) # start - quit back to main run_menu([ENTER, CTRL_X, 'c', 's', CTRL_Q]) # start - toggle to main run_menu([ENTER, CTRL_X, 'c', 's', CTRL_T]) # configure - cancel run_menu([ENTER, CTRL_X, 'c', 't', TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, LEFT, ENTER]) # configure - quit back to main run_menu([ENTER, CTRL_X, 'c', 't', CTRL_Q]) # configure - toggle back to main run_menu([ENTER, CTRL_X, 'c', 't', CTRL_T]) # configure - ok run_menu([ENTER, CTRL_X, 'c', 't', TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, TAB, ENTER, TAB, TAB, ENTER, ENTER, ENTER]) # configure - quit in the middle of add # run_menu([ENTER, CTRL_X, 'c', 't', SPACE, TAB, SPACE, TAB, SPACE, TAB, # SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, SPACE, TAB, # SPACE, TAB, TAB, ENTER, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, # DOWN, DOWN, DOWN, DOWN, DOWN, LEFT, BACKSPACE, '3', TAB, TAB, # ENTER, ENTER, TAB, ENTER, ENTER, TAB, ENTER, CTRL_Q]) # configure - instances add (add an instance of rq_worker) # run_menu([ENTER, CTRL_X, 'c', 't', SPACE, TAB, SPACE, TAB, SPACE, TAB, # SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, SPACE, TAB, # SPACE, TAB, TAB, ENTER, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, # DOWN, DOWN, DOWN, DOWN, DOWN, LEFT, BACKSPACE, '3', TAB, TAB, # ENTER, ENTER, TAB, ENTER, ENTER, TAB, ENTER, TAB, TAB, ENTER]) # configure - quit in the middle of delete # run_menu([ENTER, CTRL_X, 'c', 't', SPACE, TAB, SPACE, TAB, SPACE, TAB, # SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, SPACE, TAB, # SPACE, TAB, SPACE, TAB, TAB, ENTER, DOWN, DOWN, DOWN, DOWN, DOWN, # DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, LEFT, BACKSPACE, '2', # TAB, TAB, ENTER, ENTER, TAB, ENTER, CTRL_Q]) # configure - instances delete (delete an instance of file_drop) # run_menu([ENTER, CTRL_X, 'c', 't', SPACE, TAB, SPACE, TAB, SPACE, TAB, # SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, SPACE, TAB, # SPACE, TAB, SPACE, TAB, TAB, ENTER, DOWN, DOWN, DOWN, DOWN, DOWN, # DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, LEFT, BACKSPACE, '2', # TAB, TAB, ENTER, ENTER, TAB, ENTER, SPACE, TAB, TAB, ENTER]) # clean - ok run_menu([ENTER, CTRL_X, 'c', 'c', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, RIGHT, ENTER, ENTER, ENTER]) # clean - cancel run_menu([ENTER, CTRL_X, 'c', 'c', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER]) # clean - quit back to main run_menu([ENTER, CTRL_X, 'c', 'c', CTRL_Q]) # clean - toggle to main run_menu([ENTER, CTRL_X, 'c', 'c', CTRL_T]) # inventory - quit back to main run_menu([ENTER, CTRL_X, 'c', 'v', CTRL_Q]) # inventory - toggle to main run_menu([ENTER, CTRL_X, 'c', 'v', CTRL_T]) # inventory - toggle group view run_menu([ENTER, CTRL_X, 'c', 'v', CTRL_V, CTRL_V, CTRL_V, CTRL_V, CTRL_V, CTRL_V, CTRL_V, CTRL_V, CTRL_T]) run_menu([ENTER, CTRL_X, 'c', 's', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, RIGHT, ENTER, ENTER, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'c', 's', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER]) run_menu([ENTER, CTRL_X, 'c', 's', CTRL_Q]) run_menu([ENTER, CTRL_X, 'c', 's', CTRL_T]) # services running - core services run_menu([ENTER, CTRL_X, 's', 'c', CTRL_T]) # services running - external services run_menu([ENTER, CTRL_X, 's', 'e', CTRL_T]) run_menu([ENTER, CTRL_X, 'c', 'p', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, RIGHT, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'c', 'p', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER]) run_menu([ENTER, CTRL_X, 'c', 'p', CTRL_Q]) run_menu([ENTER, CTRL_X, 'c', 'p', CTRL_T]) run_menu([ENTER, CTRL_X, 'c', 'u', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, RIGHT, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'c', 'u', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER]) run_menu([ENTER, CTRL_X, 'c', 'u', CTRL_Q]) run_menu([ENTER, CTRL_X, 'c', 'u', CTRL_T]) run_menu([ENTER, CTRL_X, 'c', 'r', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, RIGHT, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'c', 'r', ENTER]) run_menu([ENTER, CTRL_X, 'c', 'r', CTRL_Q]) run_menu([ENTER, CTRL_X, 'c', 'r', CTRL_T]) run_menu([ENTER, CTRL_X, 'c', 't', TAB, ENTER, ENTER, ENTER]) # go through the plugins menus run_menu([ENTER, CTRL_X, 'p', 'a', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, RIGHT, ENTER, SPACE, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, SPACE, TAB, SPACE, TAB, TAB, ENTER, ENTER, ENTER]) cmds = [ENTER, CTRL_X, 'p', 'a', TAB, TAB, TAB, 'alpine', TAB, TAB, TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER] cmds += (43 * [BACKSPACE]) cmds += [TAB, TAB, TAB, BACKSPACE, BACKSPACE, BACKSPACE, BACKSPACE, BACKSPACE, BACKSPACE, TAB, TAB, TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER, CTRL_Q] run_menu(cmds) run_menu([ENTER, CTRL_X, 'p', 'a', TAB, TAB, TAB, 'alpine', TAB, 'alpine', TAB, TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER, TAB, TAB, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'p', 'a', CTRL_T, CTRL_T, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER]) run_menu([ENTER, CTRL_X, 'p', 'b', TAB, TAB, RIGHT, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'p', 'b', TAB, TAB, ENTER]) run_menu([ENTER, CTRL_X, 'p', 'b', CTRL_Q]) run_menu([ENTER, CTRL_X, 'p', 'b', CTRL_T]) run_menu([ENTER, CTRL_X, 'p', 'c', TAB, TAB, RIGHT, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'p', 'c', TAB, TAB, ENTER]) run_menu([ENTER, CTRL_X, 'p', 'c', CTRL_Q]) run_menu([ENTER, CTRL_X, 'p', 'c', CTRL_T]) run_menu([ENTER, CTRL_X, 'p', 'i', CTRL_T]) run_menu([ENTER, CTRL_X, 'p', 's', TAB, TAB, RIGHT, ENTER, ENTER, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'p', 's', TAB, TAB, ENTER]) run_menu([ENTER, CTRL_X, 'p', 's', CTRL_Q]) run_menu([ENTER, CTRL_X, 'p', 's', CTRL_T]) # services running - plugin services run_menu([ENTER, CTRL_X, 's', 'p', CTRL_T]) run_menu([ENTER, CTRL_X, 'p', 'p', RIGHT, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'p', 'p', ENTER]) run_menu([ENTER, CTRL_X, 'p', 'p', CTRL_Q]) run_menu([ENTER, CTRL_X, 'p', 'p', CTRL_T]) run_menu([ENTER, CTRL_X, 'p', 'u', TAB, TAB, RIGHT, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'p', 'u', TAB, TAB, ENTER]) run_menu([ENTER, CTRL_X, 'p', 'u', CTRL_Q]) run_menu([ENTER, CTRL_X, 'p', 'u', CTRL_T]) run_menu([ENTER, CTRL_X, 'p', 'r', TAB, TAB, RIGHT, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, 'p', 'r', ENTER]) run_menu([ENTER, CTRL_X, 'p', 'r', CTRL_Q]) run_menu([ENTER, CTRL_X, 'p', 'r', CTRL_T]) # go through the logs menus run_menu([ENTER, CTRL_X, 'l', DOWN, ENTER, CTRL_T]) run_menu([ENTER, CTRL_X, 'l', DOWN, ENTER, CTRL_Q]) # go through the services running menus run_menu([ENTER, CTRL_X, 's', 'c', CTRL_T]) run_menu([ENTER, CTRL_X, 's', 'e', CTRL_T]) run_menu([ENTER, CTRL_X, 's', 'p', CTRL_T]) # go through the system commands menus # causes .coverage file to not exist # run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 'r', TAB, RIGHT, # ENTER, ENTER, ENTER]) # system commands - backup run_menu([ENTER, CTRL_X, 'y', 'b', ENTER, ENTER]) # system commands - configure - cancel run_menu([ENTER, CTRL_X, 'y', 'c', TAB, ENTER, ENTER, ENTER]) # system commands - configure - ok run_menu([ENTER, CTRL_X, 'y', 'c', TAB, TAB, ENTER, ENTER, ENTER]) run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 'g', ENTER, ENTER]) run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 's']) run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 'u']) run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 'b', ENTER, ENTER]) run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 't', SPACE, TAB, ENTER]) run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 't', SPACE, TAB, TAB, ENTER, ENTER, ENTER]) # system commands - network tap interface - create run_menu([ENTER, CTRL_X, 'y', 'n', 'c', 'lo', TAB, 'foo', TAB, '5', TAB, TAB, '1', TAB, TAB, ENTER, ENTER, ENTER, TAB, TAB, TAB, TAB, TAB, ENTER]) # system commands - network tap interface - nics run_menu([ENTER, CTRL_X, 'y', 'n', 'n', TAB, ENTER]) run_menu([ENTER, CTRL_X, 'y', 'n', 'n', TAB, TAB, ENTER]) run_menu([ENTER, CTRL_X, 'y', 'n', 'n', CTRL_T]) # go through the tutorials menus run_menu([ENTER, CTRL_X, 't', 'v', 'b', RIGHT, ENTER])<|fim▁hole|> run_menu([ENTER, CTRL_X, 't', 'c', 'b', RIGHT, ENTER]) run_menu([ENTER, CTRL_X, 't', 'c', 'c', RIGHT, ENTER]) run_menu([ENTER, CTRL_X, 't', 'p', 'a', RIGHT, ENTER]) run_menu([ENTER, CTRL_X, 't', 'f', 'a', RIGHT, ENTER]) run_menu([ENTER, CTRL_X, 't', 's', 's', RIGHT, ENTER]) # exit # causes .coverage file to not exist # run_menu([ENTER, CTRL_Q]) # extra complete run run_menu([ENTER, CTRL_X, 'c', 'i', ENTER, ENTER, CTRL_X, 'c', 'b', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER, ENTER, CTRL_X, 'c', 's', ENTER, ENTER, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER, ENTER, ENTER, ENTER, CTRL_X, 'p', 'a', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER, SPACE, TAB, TAB, TAB, TAB, ENTER, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER, CTRL_X, 's', 'c', CTRL_T])<|fim▁end|>
run_menu([ENTER, CTRL_X, 't', 'v', 't', RIGHT, ENTER]) run_menu([ENTER, CTRL_X, 't', 'v', 's', RIGHT, ENTER])
<|file_name|>Operar_unit.java<|end_file_name|><|fim▁begin|>package testtravis; import static org.junit.Assert.*; import org.junit.Test; public class Operar_unit { @Test public void testSumar() { System.out.println("Sumar dos numeros"); int numero1 = 6; int numero2 = 6; Operaciones instance = new Operaciones(); int expResult = 12; int result = instance.sumar(numero1, numero2); assertEquals(expResult, result); <|fim▁hole|> } @Test public void testRestar() { System.out.println("Restar dos numeros"); int numero1 = 4; int numero2 = 2; Operaciones instance = new Operaciones(); int expResult = 2; int result = instance.restar(numero1, numero2); assertEquals(expResult, result); } @Test public void testMultiplicar() { System.out.println("Multiplicar dos numeros"); int numero1 = 3; int numero2 =3; Operaciones instance = new Operaciones(); int expResult = 9; int result = instance.multiplicar(numero1, numero2); assertEquals(expResult, result); } @Test public void testDividir() { System.out.println("Dividir Dos numeros"); int numero1 = 6; int numero2 = 3; Operaciones instance = new Operaciones(); int expResult = 2; int result = instance.dividir(numero1, numero2); assertEquals(expResult, result); } }<|fim▁end|>
<|file_name|>storage_v1_messages.py<|end_file_name|><|fim▁begin|># # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Generated message classes for storage version v1. Stores and retrieves potentially large, immutable data objects. """ # NOTE: This file is autogenerated and should not be edited by hand. from apitools.base.protorpclite import message_types as _message_types from apitools.base.protorpclite import messages as _messages from apitools.base.py import encoding from apitools.base.py import extra_types package = 'storage' class Bucket(_messages.Message): """A bucket. Messages: CorsValueListEntry: A CorsValueListEntry object. LifecycleValue: The bucket's lifecycle configuration. See lifecycle management for more information. LoggingValue: The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs. OwnerValue: The owner of the bucket. This is always the project team's owner group. VersioningValue: The bucket's versioning configuration. WebsiteValue: The bucket's website configuration. Fields: acl: Access controls on the bucket. cors: The bucket's Cross-Origin Resource Sharing (CORS) configuration. defaultObjectAcl: Default access controls to apply to new objects when no ACL is provided. etag: HTTP 1.1 Entity tag for the bucket. id: The ID of the bucket. kind: The kind of item this is. For buckets, this is always storage#bucket. lifecycle: The bucket's lifecycle configuration. See lifecycle management for more information. location: The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list. logging: The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs. metageneration: The metadata generation of this bucket. name: The name of the bucket. owner: The owner of the bucket. This is always the project team's owner group. projectNumber: The project number of the project the bucket belongs to. selfLink: The URI of this bucket. storageClass: The bucket's storage class. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to STANDARD. For more information, see storage classes. timeCreated: The creation time of the bucket in RFC 3339 format. updated: The modification time of the bucket in RFC 3339 format. versioning: The bucket's versioning configuration. website: The bucket's website configuration. """ class CorsValueListEntry(_messages.Message): """A CorsValueListEntry object. Fields: maxAgeSeconds: The value, in seconds, to return in the Access-Control- Max-Age header used in preflight responses. method: The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "any method". origin: The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin". responseHeader: The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains. """ maxAgeSeconds = _messages.IntegerField(1, variant=_messages.Variant.INT32) method = _messages.StringField(2, repeated=True) origin = _messages.StringField(3, repeated=True) responseHeader = _messages.StringField(4, repeated=True) class LifecycleValue(_messages.Message): """The bucket's lifecycle configuration. See lifecycle management for more information. Messages: RuleValueListEntry: A RuleValueListEntry object. Fields: rule: A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken. """ class RuleValueListEntry(_messages.Message): """A RuleValueListEntry object. Messages: ActionValue: The action to take. ConditionValue: The condition(s) under which the action will be taken. Fields: action: The action to take. condition: The condition(s) under which the action will be taken. """ class ActionValue(_messages.Message): """The action to take. Fields: type: Type of the action. Currently, only Delete is supported. """ type = _messages.StringField(1) class ConditionValue(_messages.Message): """The condition(s) under which the action will be taken. Fields: age: Age of an object (in days). This condition is satisfied when an object reaches the specified age. createdBefore: A date in RFC 3339 format with only the date part (for instance, "2013-01-15"). This condition is satisfied when an object is created before midnight of the specified date in UTC. isLive: Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects. numNewerVersions: Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object. """ age = _messages.IntegerField(1, variant=_messages.Variant.INT32) createdBefore = extra_types.DateField(2) isLive = _messages.BooleanField(3) numNewerVersions = _messages.IntegerField(4, variant=_messages.Variant.INT32) action = _messages.MessageField('ActionValue', 1) condition = _messages.MessageField('ConditionValue', 2) rule = _messages.MessageField('RuleValueListEntry', 1, repeated=True) class LoggingValue(_messages.Message): """The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs. Fields: logBucket: The destination bucket where the current bucket's logs should be placed. logObjectPrefix: A prefix for log object names. """ logBucket = _messages.StringField(1) logObjectPrefix = _messages.StringField(2) class OwnerValue(_messages.Message): """The owner of the bucket. This is always the project team's owner group. Fields: entity: The entity, in the form project-owner-projectId. entityId: The ID for the entity. """ entity = _messages.StringField(1) entityId = _messages.StringField(2) class VersioningValue(_messages.Message): """The bucket's versioning configuration. Fields: enabled: While set to true, versioning is fully enabled for this bucket. """ enabled = _messages.BooleanField(1) class WebsiteValue(_messages.Message): """The bucket's website configuration. Fields: mainPageSuffix: Behaves as the bucket's directory index where missing objects are treated as potential directories. notFoundPage: The custom object to return when a requested resource is not found. """ mainPageSuffix = _messages.StringField(1) notFoundPage = _messages.StringField(2) acl = _messages.MessageField('BucketAccessControl', 1, repeated=True) cors = _messages.MessageField('CorsValueListEntry', 2, repeated=True) defaultObjectAcl = _messages.MessageField('ObjectAccessControl', 3, repeated=True) etag = _messages.StringField(4) id = _messages.StringField(5) kind = _messages.StringField(6, default=u'storage#bucket') lifecycle = _messages.MessageField('LifecycleValue', 7) location = _messages.StringField(8) logging = _messages.MessageField('LoggingValue', 9) metageneration = _messages.IntegerField(10) name = _messages.StringField(11) owner = _messages.MessageField('OwnerValue', 12) projectNumber = _messages.IntegerField(13, variant=_messages.Variant.UINT64) selfLink = _messages.StringField(14) storageClass = _messages.StringField(15) timeCreated = _message_types.DateTimeField(16) updated = _message_types.DateTimeField(17) versioning = _messages.MessageField('VersioningValue', 18) website = _messages.MessageField('WebsiteValue', 19) class BucketAccessControl(_messages.Message): """An access-control entry. Messages: ProjectTeamValue: The project team associated with the entity, if any. Fields: bucket: The name of the bucket. domain: The domain associated with the entity, if any. email: The email address associated with the entity, if any. entity: The entity holding the permission, in one of the following forms: - user-userId - user-email - group-groupId - group-email - domain- domain - project-team-projectId - allUsers - allAuthenticatedUsers Examples: - The user [email protected] would be [email protected]. - The group [email protected] would be group- [email protected]. - To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com. entityId: The ID for the entity, if any. etag: HTTP 1.1 Entity tag for the access-control entry. id: The ID of the access-control entry. kind: The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl. projectTeam: The project team associated with the entity, if any. role: The access permission for the entity. Can be READER, WRITER, or OWNER. selfLink: The link to this access-control entry. """ class ProjectTeamValue(_messages.Message): """The project team associated with the entity, if any. Fields: projectNumber: The project number. team: The team. Can be owners, editors, or viewers. """ projectNumber = _messages.StringField(1) team = _messages.StringField(2) bucket = _messages.StringField(1) domain = _messages.StringField(2) email = _messages.StringField(3) entity = _messages.StringField(4) entityId = _messages.StringField(5) etag = _messages.StringField(6) id = _messages.StringField(7) kind = _messages.StringField(8, default=u'storage#bucketAccessControl') projectTeam = _messages.MessageField('ProjectTeamValue', 9) role = _messages.StringField(10) selfLink = _messages.StringField(11) class BucketAccessControls(_messages.Message): """An access-control list. Fields: items: The list of items. kind: The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls. """ items = _messages.MessageField('BucketAccessControl', 1, repeated=True) kind = _messages.StringField(2, default=u'storage#bucketAccessControls') class Buckets(_messages.Message): """A list of buckets. Fields: items: The list of items. kind: The kind of item this is. For lists of buckets, this is always storage#buckets. nextPageToken: The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results. """ items = _messages.MessageField('Bucket', 1, repeated=True) kind = _messages.StringField(2, default=u'storage#buckets') nextPageToken = _messages.StringField(3) class Channel(_messages.Message): """An notification channel used to watch for resource changes. Messages: ParamsValue: Additional parameters controlling delivery channel behavior. Optional.<|fim▁hole|> Fields: address: The address where notifications are delivered for this channel. expiration: Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional. id: A UUID or similar unique string that identifies this channel. kind: Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string "api#channel". params: Additional parameters controlling delivery channel behavior. Optional. payload: A Boolean value to indicate whether payload is wanted. Optional. resourceId: An opaque ID that identifies the resource being watched on this channel. Stable across different API versions. resourceUri: A version-specific identifier for the watched resource. token: An arbitrary string delivered to the target address with each notification delivered over this channel. Optional. type: The type of delivery mechanism used for this channel. """ @encoding.MapUnrecognizedFields('additionalProperties') class ParamsValue(_messages.Message): """Additional parameters controlling delivery channel behavior. Optional. Messages: AdditionalProperty: An additional property for a ParamsValue object. Fields: additionalProperties: Declares a new parameter by name. """ class AdditionalProperty(_messages.Message): """An additional property for a ParamsValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) address = _messages.StringField(1) expiration = _messages.IntegerField(2) id = _messages.StringField(3) kind = _messages.StringField(4, default=u'api#channel') params = _messages.MessageField('ParamsValue', 5) payload = _messages.BooleanField(6) resourceId = _messages.StringField(7) resourceUri = _messages.StringField(8) token = _messages.StringField(9) type = _messages.StringField(10) class ComposeRequest(_messages.Message): """A Compose request. Messages: SourceObjectsValueListEntry: A SourceObjectsValueListEntry object. Fields: destination: Properties of the resulting object. kind: The kind of item this is. sourceObjects: The list of source objects that will be concatenated into a single object. """ class SourceObjectsValueListEntry(_messages.Message): """A SourceObjectsValueListEntry object. Messages: ObjectPreconditionsValue: Conditions that must be met for this operation to execute. Fields: generation: The generation of this object to use as the source. name: The source object's name. The source object's bucket is implicitly the destination bucket. objectPreconditions: Conditions that must be met for this operation to execute. """ class ObjectPreconditionsValue(_messages.Message): """Conditions that must be met for this operation to execute. Fields: ifGenerationMatch: Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail. """ ifGenerationMatch = _messages.IntegerField(1) generation = _messages.IntegerField(1) name = _messages.StringField(2) objectPreconditions = _messages.MessageField('ObjectPreconditionsValue', 3) destination = _messages.MessageField('Object', 1) kind = _messages.StringField(2, default=u'storage#composeRequest') sourceObjects = _messages.MessageField('SourceObjectsValueListEntry', 3, repeated=True) class Object(_messages.Message): """An object. Messages: CustomerEncryptionValue: Metadata of customer-supplied encryption key, if the object is encrypted by such a key. MetadataValue: User-provided metadata, in key/value pairs. OwnerValue: The owner of the object. This will always be the uploader of the object. Fields: acl: Access controls on the object. bucket: The name of the bucket containing this object. cacheControl: Cache-Control directive for the object data. componentCount: Number of underlying components that make up this object. Components are accumulated by compose operations. contentDisposition: Content-Disposition of the object data. contentEncoding: Content-Encoding of the object data. contentLanguage: Content-Language of the object data. contentType: Content-Type of the object data. crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices. customerEncryption: Metadata of customer-supplied encryption key, if the object is encrypted by such a key. etag: HTTP 1.1 Entity tag for the object. generation: The content generation of this object. Used for object versioning. id: The ID of the object. kind: The kind of item this is. For objects, this is always storage#object. md5Hash: MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices. mediaLink: Media download link. metadata: User-provided metadata, in key/value pairs. metageneration: The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object. name: The name of this object. Required if not specified by URL parameter. owner: The owner of the object. This will always be the uploader of the object. selfLink: The link to this object. size: Content-Length of the data in bytes. storageClass: Storage class of the object. timeCreated: The creation time of the object in RFC 3339 format. timeDeleted: The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted. updated: The modification time of the object metadata in RFC 3339 format. """ class CustomerEncryptionValue(_messages.Message): """Metadata of customer-supplied encryption key, if the object is encrypted by such a key. Fields: encryptionAlgorithm: The encryption algorithm. keySha256: SHA256 hash value of the encryption key. """ encryptionAlgorithm = _messages.StringField(1) keySha256 = _messages.StringField(2) @encoding.MapUnrecognizedFields('additionalProperties') class MetadataValue(_messages.Message): """User-provided metadata, in key/value pairs. Messages: AdditionalProperty: An additional property for a MetadataValue object. Fields: additionalProperties: An individual metadata entry. """ class AdditionalProperty(_messages.Message): """An additional property for a MetadataValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) class OwnerValue(_messages.Message): """The owner of the object. This will always be the uploader of the object. Fields: entity: The entity, in the form user-userId. entityId: The ID for the entity. """ entity = _messages.StringField(1) entityId = _messages.StringField(2) acl = _messages.MessageField('ObjectAccessControl', 1, repeated=True) bucket = _messages.StringField(2) cacheControl = _messages.StringField(3) componentCount = _messages.IntegerField(4, variant=_messages.Variant.INT32) contentDisposition = _messages.StringField(5) contentEncoding = _messages.StringField(6) contentLanguage = _messages.StringField(7) contentType = _messages.StringField(8) crc32c = _messages.StringField(9) customerEncryption = _messages.MessageField('CustomerEncryptionValue', 10) etag = _messages.StringField(11) generation = _messages.IntegerField(12) id = _messages.StringField(13) kind = _messages.StringField(14, default=u'storage#object') md5Hash = _messages.StringField(15) mediaLink = _messages.StringField(16) metadata = _messages.MessageField('MetadataValue', 17) metageneration = _messages.IntegerField(18) name = _messages.StringField(19) owner = _messages.MessageField('OwnerValue', 20) selfLink = _messages.StringField(21) size = _messages.IntegerField(22, variant=_messages.Variant.UINT64) storageClass = _messages.StringField(23) timeCreated = _message_types.DateTimeField(24) timeDeleted = _message_types.DateTimeField(25) updated = _message_types.DateTimeField(26) class ObjectAccessControl(_messages.Message): """An access-control entry. Messages: ProjectTeamValue: The project team associated with the entity, if any. Fields: bucket: The name of the bucket. domain: The domain associated with the entity, if any. email: The email address associated with the entity, if any. entity: The entity holding the permission, in one of the following forms: - user-userId - user-email - group-groupId - group-email - domain- domain - project-team-projectId - allUsers - allAuthenticatedUsers Examples: - The user [email protected] would be [email protected]. - The group [email protected] would be group- [email protected]. - To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com. entityId: The ID for the entity, if any. etag: HTTP 1.1 Entity tag for the access-control entry. generation: The content generation of the object. id: The ID of the access-control entry. kind: The kind of item this is. For object access control entries, this is always storage#objectAccessControl. object: The name of the object. projectTeam: The project team associated with the entity, if any. role: The access permission for the entity. Can be READER or OWNER. selfLink: The link to this access-control entry. """ class ProjectTeamValue(_messages.Message): """The project team associated with the entity, if any. Fields: projectNumber: The project number. team: The team. Can be owners, editors, or viewers. """ projectNumber = _messages.StringField(1) team = _messages.StringField(2) bucket = _messages.StringField(1) domain = _messages.StringField(2) email = _messages.StringField(3) entity = _messages.StringField(4) entityId = _messages.StringField(5) etag = _messages.StringField(6) generation = _messages.IntegerField(7) id = _messages.StringField(8) kind = _messages.StringField(9, default=u'storage#objectAccessControl') object = _messages.StringField(10) projectTeam = _messages.MessageField('ProjectTeamValue', 11) role = _messages.StringField(12) selfLink = _messages.StringField(13) class ObjectAccessControls(_messages.Message): """An access-control list. Fields: items: The list of items. kind: The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls. """ items = _messages.MessageField('extra_types.JsonValue', 1, repeated=True) kind = _messages.StringField(2, default=u'storage#objectAccessControls') class Objects(_messages.Message): """A list of objects. Fields: items: The list of items. kind: The kind of item this is. For lists of objects, this is always storage#objects. nextPageToken: The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results. prefixes: The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter. """ items = _messages.MessageField('Object', 1, repeated=True) kind = _messages.StringField(2, default=u'storage#objects') nextPageToken = _messages.StringField(3) prefixes = _messages.StringField(4, repeated=True) class RewriteResponse(_messages.Message): """A rewrite response. Fields: done: true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response. kind: The kind of item this is. objectSize: The total size of the object being copied in bytes. This property is always present in the response. resource: A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes. rewriteToken: A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy. totalBytesRewritten: The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response. """ done = _messages.BooleanField(1) kind = _messages.StringField(2, default=u'storage#rewriteResponse') objectSize = _messages.IntegerField(3, variant=_messages.Variant.UINT64) resource = _messages.MessageField('Object', 4) rewriteToken = _messages.StringField(5) totalBytesRewritten = _messages.IntegerField(6, variant=_messages.Variant.UINT64) class StandardQueryParameters(_messages.Message): """Query parameters accepted by all methods. Enums: AltValueValuesEnum: Data format for the response. Fields: alt: Data format for the response. fields: Selector specifying which fields to include in a partial response. key: API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. oauth_token: OAuth 2.0 token for the current user. prettyPrint: Returns response with indentations and line breaks. quotaUser: Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided. trace: A tracing token of the form "token:<tokenid>" to include in api requests. userIp: IP address of the site where the request originates. Use this if you want to enforce per-user limits. """ class AltValueValuesEnum(_messages.Enum): """Data format for the response. Values: json: Responses with Content-Type of application/json """ json = 0 alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json') fields = _messages.StringField(2) key = _messages.StringField(3) oauth_token = _messages.StringField(4) prettyPrint = _messages.BooleanField(5, default=True) quotaUser = _messages.StringField(6) trace = _messages.StringField(7) userIp = _messages.StringField(8) class StorageBucketAccessControlsDeleteRequest(_messages.Message): """A StorageBucketAccessControlsDeleteRequest object. Fields: bucket: Name of a bucket. entity: The entity holding the permission. Can be user-userId, user- emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers. """ bucket = _messages.StringField(1, required=True) entity = _messages.StringField(2, required=True) class StorageBucketAccessControlsDeleteResponse(_messages.Message): """An empty StorageBucketAccessControlsDelete response.""" class StorageBucketAccessControlsGetRequest(_messages.Message): """A StorageBucketAccessControlsGetRequest object. Fields: bucket: Name of a bucket. entity: The entity holding the permission. Can be user-userId, user- emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers. """ bucket = _messages.StringField(1, required=True) entity = _messages.StringField(2, required=True) class StorageBucketAccessControlsListRequest(_messages.Message): """A StorageBucketAccessControlsListRequest object. Fields: bucket: Name of a bucket. """ bucket = _messages.StringField(1, required=True) class StorageBucketsDeleteRequest(_messages.Message): """A StorageBucketsDeleteRequest object. Fields: bucket: Name of a bucket. ifMetagenerationMatch: If set, only deletes the bucket if its metageneration matches this value. ifMetagenerationNotMatch: If set, only deletes the bucket if its metageneration does not match this value. """ bucket = _messages.StringField(1, required=True) ifMetagenerationMatch = _messages.IntegerField(2) ifMetagenerationNotMatch = _messages.IntegerField(3) class StorageBucketsDeleteResponse(_messages.Message): """An empty StorageBucketsDelete response.""" class StorageBucketsGetRequest(_messages.Message): """A StorageBucketsGetRequest object. Enums: ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl. Fields: bucket: Name of a bucket. ifMetagenerationMatch: Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value. projection: Set of properties to return. Defaults to noAcl. """ class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to noAcl. Values: full: Include all properties. noAcl: Omit acl and defaultObjectAcl properties. """ full = 0 noAcl = 1 bucket = _messages.StringField(1, required=True) ifMetagenerationMatch = _messages.IntegerField(2) ifMetagenerationNotMatch = _messages.IntegerField(3) projection = _messages.EnumField('ProjectionValueValuesEnum', 4) class StorageBucketsInsertRequest(_messages.Message): """A StorageBucketsInsertRequest object. Enums: PredefinedAclValueValuesEnum: Apply a predefined set of access controls to this bucket. PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of default object access controls to this bucket. ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full. Fields: bucket: A Bucket resource to be passed as the request body. predefinedAcl: Apply a predefined set of access controls to this bucket. predefinedDefaultObjectAcl: Apply a predefined set of default object access controls to this bucket. project: A valid API project identifier. projection: Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full. """ class PredefinedAclValueValuesEnum(_messages.Enum): """Apply a predefined set of access controls to this bucket. Values: authenticatedRead: Project team owners get OWNER access, and allAuthenticatedUsers get READER access. private: Project team owners get OWNER access. projectPrivate: Project team members get access according to their roles. publicRead: Project team owners get OWNER access, and allUsers get READER access. publicReadWrite: Project team owners get OWNER access, and allUsers get WRITER access. """ authenticatedRead = 0 private = 1 projectPrivate = 2 publicRead = 3 publicReadWrite = 4 class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum): """Apply a predefined set of default object access controls to this bucket. Values: authenticatedRead: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. bucketOwnerFullControl: Object owner gets OWNER access, and project team owners get OWNER access. bucketOwnerRead: Object owner gets OWNER access, and project team owners get READER access. private: Object owner gets OWNER access. projectPrivate: Object owner gets OWNER access, and project team members get access according to their roles. publicRead: Object owner gets OWNER access, and allUsers get READER access. """ authenticatedRead = 0 bucketOwnerFullControl = 1 bucketOwnerRead = 2 private = 3 projectPrivate = 4 publicRead = 5 class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full. Values: full: Include all properties. noAcl: Omit acl and defaultObjectAcl properties. """ full = 0 noAcl = 1 bucket = _messages.MessageField('Bucket', 1) predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 2) predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 3) project = _messages.StringField(4, required=True) projection = _messages.EnumField('ProjectionValueValuesEnum', 5) class StorageBucketsListRequest(_messages.Message): """A StorageBucketsListRequest object. Enums: ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl. Fields: maxResults: Maximum number of buckets to return. pageToken: A previously-returned page token representing part of the larger set of results to view. prefix: Filter results to buckets whose names begin with this prefix. project: A valid API project identifier. projection: Set of properties to return. Defaults to noAcl. """ class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to noAcl. Values: full: Include all properties. noAcl: Omit acl and defaultObjectAcl properties. """ full = 0 noAcl = 1 maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32) pageToken = _messages.StringField(2) prefix = _messages.StringField(3) project = _messages.StringField(4, required=True) projection = _messages.EnumField('ProjectionValueValuesEnum', 5) class StorageBucketsPatchRequest(_messages.Message): """A StorageBucketsPatchRequest object. Enums: PredefinedAclValueValuesEnum: Apply a predefined set of access controls to this bucket. PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of default object access controls to this bucket. ProjectionValueValuesEnum: Set of properties to return. Defaults to full. Fields: bucket: Name of a bucket. bucketResource: A Bucket resource to be passed as the request body. ifMetagenerationMatch: Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value. predefinedAcl: Apply a predefined set of access controls to this bucket. predefinedDefaultObjectAcl: Apply a predefined set of default object access controls to this bucket. projection: Set of properties to return. Defaults to full. """ class PredefinedAclValueValuesEnum(_messages.Enum): """Apply a predefined set of access controls to this bucket. Values: authenticatedRead: Project team owners get OWNER access, and allAuthenticatedUsers get READER access. private: Project team owners get OWNER access. projectPrivate: Project team members get access according to their roles. publicRead: Project team owners get OWNER access, and allUsers get READER access. publicReadWrite: Project team owners get OWNER access, and allUsers get WRITER access. """ authenticatedRead = 0 private = 1 projectPrivate = 2 publicRead = 3 publicReadWrite = 4 class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum): """Apply a predefined set of default object access controls to this bucket. Values: authenticatedRead: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. bucketOwnerFullControl: Object owner gets OWNER access, and project team owners get OWNER access. bucketOwnerRead: Object owner gets OWNER access, and project team owners get READER access. private: Object owner gets OWNER access. projectPrivate: Object owner gets OWNER access, and project team members get access according to their roles. publicRead: Object owner gets OWNER access, and allUsers get READER access. """ authenticatedRead = 0 bucketOwnerFullControl = 1 bucketOwnerRead = 2 private = 3 projectPrivate = 4 publicRead = 5 class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to full. Values: full: Include all properties. noAcl: Omit acl and defaultObjectAcl properties. """ full = 0 noAcl = 1 bucket = _messages.StringField(1, required=True) bucketResource = _messages.MessageField('Bucket', 2) ifMetagenerationMatch = _messages.IntegerField(3) ifMetagenerationNotMatch = _messages.IntegerField(4) predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5) predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6) projection = _messages.EnumField('ProjectionValueValuesEnum', 7) class StorageBucketsUpdateRequest(_messages.Message): """A StorageBucketsUpdateRequest object. Enums: PredefinedAclValueValuesEnum: Apply a predefined set of access controls to this bucket. PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of default object access controls to this bucket. ProjectionValueValuesEnum: Set of properties to return. Defaults to full. Fields: bucket: Name of a bucket. bucketResource: A Bucket resource to be passed as the request body. ifMetagenerationMatch: Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value. predefinedAcl: Apply a predefined set of access controls to this bucket. predefinedDefaultObjectAcl: Apply a predefined set of default object access controls to this bucket. projection: Set of properties to return. Defaults to full. """ class PredefinedAclValueValuesEnum(_messages.Enum): """Apply a predefined set of access controls to this bucket. Values: authenticatedRead: Project team owners get OWNER access, and allAuthenticatedUsers get READER access. private: Project team owners get OWNER access. projectPrivate: Project team members get access according to their roles. publicRead: Project team owners get OWNER access, and allUsers get READER access. publicReadWrite: Project team owners get OWNER access, and allUsers get WRITER access. """ authenticatedRead = 0 private = 1 projectPrivate = 2 publicRead = 3 publicReadWrite = 4 class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum): """Apply a predefined set of default object access controls to this bucket. Values: authenticatedRead: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. bucketOwnerFullControl: Object owner gets OWNER access, and project team owners get OWNER access. bucketOwnerRead: Object owner gets OWNER access, and project team owners get READER access. private: Object owner gets OWNER access. projectPrivate: Object owner gets OWNER access, and project team members get access according to their roles. publicRead: Object owner gets OWNER access, and allUsers get READER access. """ authenticatedRead = 0 bucketOwnerFullControl = 1 bucketOwnerRead = 2 private = 3 projectPrivate = 4 publicRead = 5 class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to full. Values: full: Include all properties. noAcl: Omit acl and defaultObjectAcl properties. """ full = 0 noAcl = 1 bucket = _messages.StringField(1, required=True) bucketResource = _messages.MessageField('Bucket', 2) ifMetagenerationMatch = _messages.IntegerField(3) ifMetagenerationNotMatch = _messages.IntegerField(4) predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5) predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6) projection = _messages.EnumField('ProjectionValueValuesEnum', 7) class StorageChannelsStopResponse(_messages.Message): """An empty StorageChannelsStop response.""" class StorageDefaultObjectAccessControlsDeleteRequest(_messages.Message): """A StorageDefaultObjectAccessControlsDeleteRequest object. Fields: bucket: Name of a bucket. entity: The entity holding the permission. Can be user-userId, user- emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers. """ bucket = _messages.StringField(1, required=True) entity = _messages.StringField(2, required=True) class StorageDefaultObjectAccessControlsDeleteResponse(_messages.Message): """An empty StorageDefaultObjectAccessControlsDelete response.""" class StorageDefaultObjectAccessControlsGetRequest(_messages.Message): """A StorageDefaultObjectAccessControlsGetRequest object. Fields: bucket: Name of a bucket. entity: The entity holding the permission. Can be user-userId, user- emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers. """ bucket = _messages.StringField(1, required=True) entity = _messages.StringField(2, required=True) class StorageDefaultObjectAccessControlsListRequest(_messages.Message): """A StorageDefaultObjectAccessControlsListRequest object. Fields: bucket: Name of a bucket. ifMetagenerationMatch: If present, only return default ACL listing if the bucket's current metageneration matches this value. ifMetagenerationNotMatch: If present, only return default ACL listing if the bucket's current metageneration does not match the given value. """ bucket = _messages.StringField(1, required=True) ifMetagenerationMatch = _messages.IntegerField(2) ifMetagenerationNotMatch = _messages.IntegerField(3) class StorageObjectAccessControlsDeleteRequest(_messages.Message): """A StorageObjectAccessControlsDeleteRequest object. Fields: bucket: Name of a bucket. entity: The entity holding the permission. Can be user-userId, user- emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers. generation: If present, selects a specific revision of this object (as opposed to the latest version, the default). object: Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. """ bucket = _messages.StringField(1, required=True) entity = _messages.StringField(2, required=True) generation = _messages.IntegerField(3) object = _messages.StringField(4, required=True) class StorageObjectAccessControlsDeleteResponse(_messages.Message): """An empty StorageObjectAccessControlsDelete response.""" class StorageObjectAccessControlsGetRequest(_messages.Message): """A StorageObjectAccessControlsGetRequest object. Fields: bucket: Name of a bucket. entity: The entity holding the permission. Can be user-userId, user- emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers. generation: If present, selects a specific revision of this object (as opposed to the latest version, the default). object: Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. """ bucket = _messages.StringField(1, required=True) entity = _messages.StringField(2, required=True) generation = _messages.IntegerField(3) object = _messages.StringField(4, required=True) class StorageObjectAccessControlsInsertRequest(_messages.Message): """A StorageObjectAccessControlsInsertRequest object. Fields: bucket: Name of a bucket. generation: If present, selects a specific revision of this object (as opposed to the latest version, the default). object: Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. objectAccessControl: A ObjectAccessControl resource to be passed as the request body. """ bucket = _messages.StringField(1, required=True) generation = _messages.IntegerField(2) object = _messages.StringField(3, required=True) objectAccessControl = _messages.MessageField('ObjectAccessControl', 4) class StorageObjectAccessControlsListRequest(_messages.Message): """A StorageObjectAccessControlsListRequest object. Fields: bucket: Name of a bucket. generation: If present, selects a specific revision of this object (as opposed to the latest version, the default). object: Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. """ bucket = _messages.StringField(1, required=True) generation = _messages.IntegerField(2) object = _messages.StringField(3, required=True) class StorageObjectAccessControlsPatchRequest(_messages.Message): """A StorageObjectAccessControlsPatchRequest object. Fields: bucket: Name of a bucket. entity: The entity holding the permission. Can be user-userId, user- emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers. generation: If present, selects a specific revision of this object (as opposed to the latest version, the default). object: Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. objectAccessControl: A ObjectAccessControl resource to be passed as the request body. """ bucket = _messages.StringField(1, required=True) entity = _messages.StringField(2, required=True) generation = _messages.IntegerField(3) object = _messages.StringField(4, required=True) objectAccessControl = _messages.MessageField('ObjectAccessControl', 5) class StorageObjectAccessControlsUpdateRequest(_messages.Message): """A StorageObjectAccessControlsUpdateRequest object. Fields: bucket: Name of a bucket. entity: The entity holding the permission. Can be user-userId, user- emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers. generation: If present, selects a specific revision of this object (as opposed to the latest version, the default). object: Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. objectAccessControl: A ObjectAccessControl resource to be passed as the request body. """ bucket = _messages.StringField(1, required=True) entity = _messages.StringField(2, required=True) generation = _messages.IntegerField(3) object = _messages.StringField(4, required=True) objectAccessControl = _messages.MessageField('ObjectAccessControl', 5) class StorageObjectsComposeRequest(_messages.Message): """A StorageObjectsComposeRequest object. Enums: DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access controls to the destination object. Fields: composeRequest: A ComposeRequest resource to be passed as the request body. destinationBucket: Name of the bucket in which to store the new object. destinationObject: Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. destinationPredefinedAcl: Apply a predefined set of access controls to the destination object. ifGenerationMatch: Makes the operation conditional on whether the object's current generation matches the given value. ifMetagenerationMatch: Makes the operation conditional on whether the object's current metageneration matches the given value. """ class DestinationPredefinedAclValueValuesEnum(_messages.Enum): """Apply a predefined set of access controls to the destination object. Values: authenticatedRead: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. bucketOwnerFullControl: Object owner gets OWNER access, and project team owners get OWNER access. bucketOwnerRead: Object owner gets OWNER access, and project team owners get READER access. private: Object owner gets OWNER access. projectPrivate: Object owner gets OWNER access, and project team members get access according to their roles. publicRead: Object owner gets OWNER access, and allUsers get READER access. """ authenticatedRead = 0 bucketOwnerFullControl = 1 bucketOwnerRead = 2 private = 3 projectPrivate = 4 publicRead = 5 composeRequest = _messages.MessageField('ComposeRequest', 1) destinationBucket = _messages.StringField(2, required=True) destinationObject = _messages.StringField(3, required=True) destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4) ifGenerationMatch = _messages.IntegerField(5) ifMetagenerationMatch = _messages.IntegerField(6) class StorageObjectsCopyRequest(_messages.Message): """A StorageObjectsCopyRequest object. Enums: DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access controls to the destination object. ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. Fields: destinationBucket: Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. destinationObject: Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. destinationPredefinedAcl: Apply a predefined set of access controls to the destination object. ifGenerationMatch: Makes the operation conditional on whether the destination object's current generation matches the given value. ifGenerationNotMatch: Makes the operation conditional on whether the destination object's current generation does not match the given value. ifMetagenerationMatch: Makes the operation conditional on whether the destination object's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the operation conditional on whether the destination object's current metageneration does not match the given value. ifSourceGenerationMatch: Makes the operation conditional on whether the source object's generation matches the given value. ifSourceGenerationNotMatch: Makes the operation conditional on whether the source object's generation does not match the given value. ifSourceMetagenerationMatch: Makes the operation conditional on whether the source object's current metageneration matches the given value. ifSourceMetagenerationNotMatch: Makes the operation conditional on whether the source object's current metageneration does not match the given value. object: A Object resource to be passed as the request body. projection: Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. sourceBucket: Name of the bucket in which to find the source object. sourceGeneration: If present, selects a specific revision of the source object (as opposed to the latest version, the default). sourceObject: Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. """ class DestinationPredefinedAclValueValuesEnum(_messages.Enum): """Apply a predefined set of access controls to the destination object. Values: authenticatedRead: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. bucketOwnerFullControl: Object owner gets OWNER access, and project team owners get OWNER access. bucketOwnerRead: Object owner gets OWNER access, and project team owners get READER access. private: Object owner gets OWNER access. projectPrivate: Object owner gets OWNER access, and project team members get access according to their roles. publicRead: Object owner gets OWNER access, and allUsers get READER access. """ authenticatedRead = 0 bucketOwnerFullControl = 1 bucketOwnerRead = 2 private = 3 projectPrivate = 4 publicRead = 5 class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. Values: full: Include all properties. noAcl: Omit the acl property. """ full = 0 noAcl = 1 destinationBucket = _messages.StringField(1, required=True) destinationObject = _messages.StringField(2, required=True) destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 3) ifGenerationMatch = _messages.IntegerField(4) ifGenerationNotMatch = _messages.IntegerField(5) ifMetagenerationMatch = _messages.IntegerField(6) ifMetagenerationNotMatch = _messages.IntegerField(7) ifSourceGenerationMatch = _messages.IntegerField(8) ifSourceGenerationNotMatch = _messages.IntegerField(9) ifSourceMetagenerationMatch = _messages.IntegerField(10) ifSourceMetagenerationNotMatch = _messages.IntegerField(11) object = _messages.MessageField('Object', 12) projection = _messages.EnumField('ProjectionValueValuesEnum', 13) sourceBucket = _messages.StringField(14, required=True) sourceGeneration = _messages.IntegerField(15) sourceObject = _messages.StringField(16, required=True) class StorageObjectsDeleteRequest(_messages.Message): """A StorageObjectsDeleteRequest object. Fields: bucket: Name of the bucket in which the object resides. generation: If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default). ifGenerationMatch: Makes the operation conditional on whether the object's current generation matches the given value. ifGenerationNotMatch: Makes the operation conditional on whether the object's current generation does not match the given value. ifMetagenerationMatch: Makes the operation conditional on whether the object's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the operation conditional on whether the object's current metageneration does not match the given value. object: Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. """ bucket = _messages.StringField(1, required=True) generation = _messages.IntegerField(2) ifGenerationMatch = _messages.IntegerField(3) ifGenerationNotMatch = _messages.IntegerField(4) ifMetagenerationMatch = _messages.IntegerField(5) ifMetagenerationNotMatch = _messages.IntegerField(6) object = _messages.StringField(7, required=True) class StorageObjectsDeleteResponse(_messages.Message): """An empty StorageObjectsDelete response.""" class StorageObjectsGetRequest(_messages.Message): """A StorageObjectsGetRequest object. Enums: ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl. Fields: bucket: Name of the bucket in which the object resides. generation: If present, selects a specific revision of this object (as opposed to the latest version, the default). ifGenerationMatch: Makes the operation conditional on whether the object's generation matches the given value. ifGenerationNotMatch: Makes the operation conditional on whether the object's generation does not match the given value. ifMetagenerationMatch: Makes the operation conditional on whether the object's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the operation conditional on whether the object's current metageneration does not match the given value. object: Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. projection: Set of properties to return. Defaults to noAcl. """ class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to noAcl. Values: full: Include all properties. noAcl: Omit the acl property. """ full = 0 noAcl = 1 bucket = _messages.StringField(1, required=True) generation = _messages.IntegerField(2) ifGenerationMatch = _messages.IntegerField(3) ifGenerationNotMatch = _messages.IntegerField(4) ifMetagenerationMatch = _messages.IntegerField(5) ifMetagenerationNotMatch = _messages.IntegerField(6) object = _messages.StringField(7, required=True) projection = _messages.EnumField('ProjectionValueValuesEnum', 8) class StorageObjectsInsertRequest(_messages.Message): """A StorageObjectsInsertRequest object. Enums: PredefinedAclValueValuesEnum: Apply a predefined set of access controls to this object. ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. Fields: bucket: Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any. contentEncoding: If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded. ifGenerationMatch: Makes the operation conditional on whether the object's current generation matches the given value. ifGenerationNotMatch: Makes the operation conditional on whether the object's current generation does not match the given value. ifMetagenerationMatch: Makes the operation conditional on whether the object's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the operation conditional on whether the object's current metageneration does not match the given value. name: Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. object: A Object resource to be passed as the request body. predefinedAcl: Apply a predefined set of access controls to this object. projection: Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. """ class PredefinedAclValueValuesEnum(_messages.Enum): """Apply a predefined set of access controls to this object. Values: authenticatedRead: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. bucketOwnerFullControl: Object owner gets OWNER access, and project team owners get OWNER access. bucketOwnerRead: Object owner gets OWNER access, and project team owners get READER access. private: Object owner gets OWNER access. projectPrivate: Object owner gets OWNER access, and project team members get access according to their roles. publicRead: Object owner gets OWNER access, and allUsers get READER access. """ authenticatedRead = 0 bucketOwnerFullControl = 1 bucketOwnerRead = 2 private = 3 projectPrivate = 4 publicRead = 5 class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. Values: full: Include all properties. noAcl: Omit the acl property. """ full = 0 noAcl = 1 bucket = _messages.StringField(1, required=True) contentEncoding = _messages.StringField(2) ifGenerationMatch = _messages.IntegerField(3) ifGenerationNotMatch = _messages.IntegerField(4) ifMetagenerationMatch = _messages.IntegerField(5) ifMetagenerationNotMatch = _messages.IntegerField(6) name = _messages.StringField(7) object = _messages.MessageField('Object', 8) predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9) projection = _messages.EnumField('ProjectionValueValuesEnum', 10) class StorageObjectsListRequest(_messages.Message): """A StorageObjectsListRequest object. Enums: ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl. Fields: bucket: Name of the bucket in which to look for objects. delimiter: Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted. maxResults: Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items. pageToken: A previously-returned page token representing part of the larger set of results to view. prefix: Filter results to objects whose names begin with this prefix. projection: Set of properties to return. Defaults to noAcl. versions: If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning. """ class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to noAcl. Values: full: Include all properties. noAcl: Omit the acl property. """ full = 0 noAcl = 1 bucket = _messages.StringField(1, required=True) delimiter = _messages.StringField(2) maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32) pageToken = _messages.StringField(4) prefix = _messages.StringField(5) projection = _messages.EnumField('ProjectionValueValuesEnum', 6) versions = _messages.BooleanField(7) class StorageObjectsPatchRequest(_messages.Message): """A StorageObjectsPatchRequest object. Enums: PredefinedAclValueValuesEnum: Apply a predefined set of access controls to this object. ProjectionValueValuesEnum: Set of properties to return. Defaults to full. Fields: bucket: Name of the bucket in which the object resides. generation: If present, selects a specific revision of this object (as opposed to the latest version, the default). ifGenerationMatch: Makes the operation conditional on whether the object's current generation matches the given value. ifGenerationNotMatch: Makes the operation conditional on whether the object's current generation does not match the given value. ifMetagenerationMatch: Makes the operation conditional on whether the object's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the operation conditional on whether the object's current metageneration does not match the given value. object: Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. objectResource: A Object resource to be passed as the request body. predefinedAcl: Apply a predefined set of access controls to this object. projection: Set of properties to return. Defaults to full. """ class PredefinedAclValueValuesEnum(_messages.Enum): """Apply a predefined set of access controls to this object. Values: authenticatedRead: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. bucketOwnerFullControl: Object owner gets OWNER access, and project team owners get OWNER access. bucketOwnerRead: Object owner gets OWNER access, and project team owners get READER access. private: Object owner gets OWNER access. projectPrivate: Object owner gets OWNER access, and project team members get access according to their roles. publicRead: Object owner gets OWNER access, and allUsers get READER access. """ authenticatedRead = 0 bucketOwnerFullControl = 1 bucketOwnerRead = 2 private = 3 projectPrivate = 4 publicRead = 5 class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to full. Values: full: Include all properties. noAcl: Omit the acl property. """ full = 0 noAcl = 1 bucket = _messages.StringField(1, required=True) generation = _messages.IntegerField(2) ifGenerationMatch = _messages.IntegerField(3) ifGenerationNotMatch = _messages.IntegerField(4) ifMetagenerationMatch = _messages.IntegerField(5) ifMetagenerationNotMatch = _messages.IntegerField(6) object = _messages.StringField(7, required=True) objectResource = _messages.MessageField('Object', 8) predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9) projection = _messages.EnumField('ProjectionValueValuesEnum', 10) class StorageObjectsRewriteRequest(_messages.Message): """A StorageObjectsRewriteRequest object. Enums: DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access controls to the destination object. ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. Fields: destinationBucket: Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any. destinationObject: Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. destinationPredefinedAcl: Apply a predefined set of access controls to the destination object. ifGenerationMatch: Makes the operation conditional on whether the destination object's current generation matches the given value. ifGenerationNotMatch: Makes the operation conditional on whether the destination object's current generation does not match the given value. ifMetagenerationMatch: Makes the operation conditional on whether the destination object's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the operation conditional on whether the destination object's current metageneration does not match the given value. ifSourceGenerationMatch: Makes the operation conditional on whether the source object's generation matches the given value. ifSourceGenerationNotMatch: Makes the operation conditional on whether the source object's generation does not match the given value. ifSourceMetagenerationMatch: Makes the operation conditional on whether the source object's current metageneration matches the given value. ifSourceMetagenerationNotMatch: Makes the operation conditional on whether the source object's current metageneration does not match the given value. maxBytesRewrittenPerCall: The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid. object: A Object resource to be passed as the request body. projection: Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. rewriteToken: Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request. sourceBucket: Name of the bucket in which to find the source object. sourceGeneration: If present, selects a specific revision of the source object (as opposed to the latest version, the default). sourceObject: Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. """ class DestinationPredefinedAclValueValuesEnum(_messages.Enum): """Apply a predefined set of access controls to the destination object. Values: authenticatedRead: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. bucketOwnerFullControl: Object owner gets OWNER access, and project team owners get OWNER access. bucketOwnerRead: Object owner gets OWNER access, and project team owners get READER access. private: Object owner gets OWNER access. projectPrivate: Object owner gets OWNER access, and project team members get access according to their roles. publicRead: Object owner gets OWNER access, and allUsers get READER access. """ authenticatedRead = 0 bucketOwnerFullControl = 1 bucketOwnerRead = 2 private = 3 projectPrivate = 4 publicRead = 5 class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. Values: full: Include all properties. noAcl: Omit the acl property. """ full = 0 noAcl = 1 destinationBucket = _messages.StringField(1, required=True) destinationObject = _messages.StringField(2, required=True) destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 3) ifGenerationMatch = _messages.IntegerField(4) ifGenerationNotMatch = _messages.IntegerField(5) ifMetagenerationMatch = _messages.IntegerField(6) ifMetagenerationNotMatch = _messages.IntegerField(7) ifSourceGenerationMatch = _messages.IntegerField(8) ifSourceGenerationNotMatch = _messages.IntegerField(9) ifSourceMetagenerationMatch = _messages.IntegerField(10) ifSourceMetagenerationNotMatch = _messages.IntegerField(11) maxBytesRewrittenPerCall = _messages.IntegerField(12) object = _messages.MessageField('Object', 13) projection = _messages.EnumField('ProjectionValueValuesEnum', 14) rewriteToken = _messages.StringField(15) sourceBucket = _messages.StringField(16, required=True) sourceGeneration = _messages.IntegerField(17) sourceObject = _messages.StringField(18, required=True) class StorageObjectsUpdateRequest(_messages.Message): """A StorageObjectsUpdateRequest object. Enums: PredefinedAclValueValuesEnum: Apply a predefined set of access controls to this object. ProjectionValueValuesEnum: Set of properties to return. Defaults to full. Fields: bucket: Name of the bucket in which the object resides. generation: If present, selects a specific revision of this object (as opposed to the latest version, the default). ifGenerationMatch: Makes the operation conditional on whether the object's current generation matches the given value. ifGenerationNotMatch: Makes the operation conditional on whether the object's current generation does not match the given value. ifMetagenerationMatch: Makes the operation conditional on whether the object's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the operation conditional on whether the object's current metageneration does not match the given value. object: Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts. objectResource: A Object resource to be passed as the request body. predefinedAcl: Apply a predefined set of access controls to this object. projection: Set of properties to return. Defaults to full. """ class PredefinedAclValueValuesEnum(_messages.Enum): """Apply a predefined set of access controls to this object. Values: authenticatedRead: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. bucketOwnerFullControl: Object owner gets OWNER access, and project team owners get OWNER access. bucketOwnerRead: Object owner gets OWNER access, and project team owners get READER access. private: Object owner gets OWNER access. projectPrivate: Object owner gets OWNER access, and project team members get access according to their roles. publicRead: Object owner gets OWNER access, and allUsers get READER access. """ authenticatedRead = 0 bucketOwnerFullControl = 1 bucketOwnerRead = 2 private = 3 projectPrivate = 4 publicRead = 5 class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to full. Values: full: Include all properties. noAcl: Omit the acl property. """ full = 0 noAcl = 1 bucket = _messages.StringField(1, required=True) generation = _messages.IntegerField(2) ifGenerationMatch = _messages.IntegerField(3) ifGenerationNotMatch = _messages.IntegerField(4) ifMetagenerationMatch = _messages.IntegerField(5) ifMetagenerationNotMatch = _messages.IntegerField(6) object = _messages.StringField(7, required=True) objectResource = _messages.MessageField('Object', 8) predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9) projection = _messages.EnumField('ProjectionValueValuesEnum', 10) class StorageObjectsWatchAllRequest(_messages.Message): """A StorageObjectsWatchAllRequest object. Enums: ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl. Fields: bucket: Name of the bucket in which to look for objects. channel: A Channel resource to be passed as the request body. delimiter: Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted. maxResults: Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items. pageToken: A previously-returned page token representing part of the larger set of results to view. prefix: Filter results to objects whose names begin with this prefix. projection: Set of properties to return. Defaults to noAcl. versions: If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning. """ class ProjectionValueValuesEnum(_messages.Enum): """Set of properties to return. Defaults to noAcl. Values: full: Include all properties. noAcl: Omit the acl property. """ full = 0 noAcl = 1 bucket = _messages.StringField(1, required=True) channel = _messages.MessageField('Channel', 2) delimiter = _messages.StringField(3) maxResults = _messages.IntegerField(4, variant=_messages.Variant.UINT32) pageToken = _messages.StringField(5) prefix = _messages.StringField(6) projection = _messages.EnumField('ProjectionValueValuesEnum', 7) versions = _messages.BooleanField(8)<|fim▁end|>
<|file_name|>port1.rs<|end_file_name|><|fim▁begin|>#[doc = r"Register block"] #[repr(C)] pub struct RegisterBlock { #[doc = "0x00 - Port 1 Output Register"] pub out: crate::Reg<out::OUT_SPEC>, #[doc = "0x04 - Port 1 Output Modification Register"]<|fim▁hole|> #[doc = "0x10 - Port 1 Input/Output Control Register 0"] pub iocr0: crate::Reg<iocr0::IOCR0_SPEC>, #[doc = "0x14 - Port 1 Input/Output Control Register 4"] pub iocr4: crate::Reg<iocr4::IOCR4_SPEC>, #[doc = "0x18 - Port 1 Input/Output Control Register 8"] pub iocr8: crate::Reg<iocr8::IOCR8_SPEC>, #[doc = "0x1c - Port 1 Input/Output Control Register 12"] pub iocr12: crate::Reg<iocr12::IOCR12_SPEC>, _reserved6: [u8; 0x04], #[doc = "0x24 - Port 1 Input Register"] pub in_: crate::Reg<in_::IN_SPEC>, _reserved7: [u8; 0x18], #[doc = "0x40 - Port 1 Pad Driver Mode 0 Register"] pub pdr0: crate::Reg<pdr0::PDR0_SPEC>, #[doc = "0x44 - Port 1 Pad Driver Mode 1 Register"] pub pdr1: crate::Reg<pdr1::PDR1_SPEC>, _reserved9: [u8; 0x18], #[doc = "0x60 - Port 1 Pin Function Decision Control Register"] pub pdisc: crate::Reg<pdisc::PDISC_SPEC>, _reserved10: [u8; 0x0c], #[doc = "0x70 - Port 1 Pin Power Save Register"] pub pps: crate::Reg<pps::PPS_SPEC>, #[doc = "0x74 - Port 1 Pin Hardware Select Register"] pub hwsel: crate::Reg<hwsel::HWSEL_SPEC>, } #[doc = "OUT register accessor: an alias for `Reg<OUT_SPEC>`"] pub type OUT = crate::Reg<out::OUT_SPEC>; #[doc = "Port 1 Output Register"] pub mod out; #[doc = "OMR register accessor: an alias for `Reg<OMR_SPEC>`"] pub type OMR = crate::Reg<omr::OMR_SPEC>; #[doc = "Port 1 Output Modification Register"] pub mod omr; #[doc = "IOCR0 register accessor: an alias for `Reg<IOCR0_SPEC>`"] pub type IOCR0 = crate::Reg<iocr0::IOCR0_SPEC>; #[doc = "Port 1 Input/Output Control Register 0"] pub mod iocr0; #[doc = "IOCR4 register accessor: an alias for `Reg<IOCR4_SPEC>`"] pub type IOCR4 = crate::Reg<iocr4::IOCR4_SPEC>; #[doc = "Port 1 Input/Output Control Register 4"] pub mod iocr4; #[doc = "IOCR8 register accessor: an alias for `Reg<IOCR8_SPEC>`"] pub type IOCR8 = crate::Reg<iocr8::IOCR8_SPEC>; #[doc = "Port 1 Input/Output Control Register 8"] pub mod iocr8; #[doc = "IOCR12 register accessor: an alias for `Reg<IOCR12_SPEC>`"] pub type IOCR12 = crate::Reg<iocr12::IOCR12_SPEC>; #[doc = "Port 1 Input/Output Control Register 12"] pub mod iocr12; #[doc = "IN register accessor: an alias for `Reg<IN_SPEC>`"] pub type IN = crate::Reg<in_::IN_SPEC>; #[doc = "Port 1 Input Register"] pub mod in_; #[doc = "PDR0 register accessor: an alias for `Reg<PDR0_SPEC>`"] pub type PDR0 = crate::Reg<pdr0::PDR0_SPEC>; #[doc = "Port 1 Pad Driver Mode 0 Register"] pub mod pdr0; #[doc = "PDR1 register accessor: an alias for `Reg<PDR1_SPEC>`"] pub type PDR1 = crate::Reg<pdr1::PDR1_SPEC>; #[doc = "Port 1 Pad Driver Mode 1 Register"] pub mod pdr1; #[doc = "PDISC register accessor: an alias for `Reg<PDISC_SPEC>`"] pub type PDISC = crate::Reg<pdisc::PDISC_SPEC>; #[doc = "Port 1 Pin Function Decision Control Register"] pub mod pdisc; #[doc = "PPS register accessor: an alias for `Reg<PPS_SPEC>`"] pub type PPS = crate::Reg<pps::PPS_SPEC>; #[doc = "Port 1 Pin Power Save Register"] pub mod pps; #[doc = "HWSEL register accessor: an alias for `Reg<HWSEL_SPEC>`"] pub type HWSEL = crate::Reg<hwsel::HWSEL_SPEC>; #[doc = "Port 1 Pin Hardware Select Register"] pub mod hwsel;<|fim▁end|>
pub omr: crate::Reg<omr::OMR_SPEC>, _reserved2: [u8; 0x08],
<|file_name|>scanner.py<|end_file_name|><|fim▁begin|>""" Copyright 2014 Quentin Kaiser Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from nessusobject import NessusObject class Scanner(NessusObject): """ A Nessus Scan Template instance. Attributes: _Google Python Style Guide: http://google-styleguide.googlecode.com/svn/trunk/pyguide.html """ def __init__(self, server): """Constructor""" super(Scanner, self).__init__(server) self._id = None self._uuid = None self._name = None self._type = None self._status = None self._scan_count = 0 self._engine_version = None self._platform = None self._loaded_plugin_set = None self._registration_code = None self._owner = None @property def id(self): return self._id @id.setter def id(self, value): self._id = int(value) @property def uuid(self): return self._uuid @uuid.setter def uuid(self, value): self._uuid = str(value) @property def name(self): return self._name @name.setter def name(self, value): self._name = str(value) @property def type(self): return self._type @type.setter def type(self, value): self._type = str(value) @property def status(self): return self._status @status.setter def status(self, value): self._status = str(value) @property def scan_count(self): return self._scan_count @scan_count.setter def scan_count(self, value): self._scan_count = int(value) @property def engine_version(self): return self._engine_version @engine_version.setter def engine_version(self, value): self._engine_version = str(value) @property def platform(self): return self._platform @platform.setter def platform(self, value): self._platform = str(value) @property def loaded_plugin_set(self): return self._loaded_plugin_set @loaded_plugin_set.setter def loaded_plugin_set(self, value): self._loaded_plugin_set = str(value) @property def registration_code(self): return self._registration_code @registration_code.setter def registration_code(self, value): self._registration_code = str(value) <|fim▁hole|> def owner(self): return self._owner @owner.setter def owner(self, value): self._owner = str(value)<|fim▁end|>
@property
<|file_name|>ex_402.py<|end_file_name|><|fim▁begin|>class Solution(object): def removeKdigits(self, num, k): """ :type num: str :type k: int :rtype: str<|fim▁hole|> while k and stack and stack[-1] > c: stack.pop() k -= 1 stack.append(c) return ''.join(stack[:length]).lstrip('0') or '0'<|fim▁end|>
""" stack = [] length = len(num) - k for c in num:
<|file_name|>viewport.rs<|end_file_name|><|fim▁begin|>//! Provides a utility method for calculating native viewport size when the window is resized. use ppu::{SCREEN_WIDTH, SCREEN_HEIGHT}; /// A simple rectangle pub struct Viewport { pub x: u32, pub y: u32, pub w: u32, pub h: u32, } impl Viewport { /// Calculates a viewport to use for a window of the given size. /// /// The returned viewport will have the native SNES aspect ratio and still fill the window on at /// least one axis. Basically, this calculates the black bars to apply to the window to make the /// center have the native SNES ratio. pub fn for_window_size(w: u32, h: u32) -> Self { // FIXME Not sure if floats are a good idea here let w = w as f32; let h = h as f32; const NATIVE_RATIO: f32 = SCREEN_WIDTH as f32 / SCREEN_HEIGHT as f32; let ratio = w / h; let view_w; let view_h; if ratio > NATIVE_RATIO { // Too wide view_h = h; view_w = h * NATIVE_RATIO; } else { // Too high view_w = w; view_h = w / NATIVE_RATIO; } let border_x = (w - view_w).round() as u32 / 2; let border_y = (h - view_h).round() as u32 / 2; let view_w = view_w.round() as u32; let view_h = view_h.round() as u32;<|fim▁hole|> Viewport { x: border_x as u32, y: border_y as u32, w: view_w, h: view_h, } } }<|fim▁end|>
<|file_name|>eagleplatform.py<|end_file_name|><|fim▁begin|># coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, ) class EaglePlatformIE(InfoExtractor): _VALID_URL = r'''(?x) (?: eagleplatform:(?P<custom_host>[^/]+):| https?://(?P<host>.+?\.media\.eagleplatform\.com)/index/player\?.*\brecord_id= ) (?P<id>\d+) ''' _TESTS = [{ # http://lenta.ru/news/2015/03/06/navalny/ 'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201', 'md5': '0b7994faa2bd5c0f69a3db6db28d078d', 'info_dict': { 'id': '227304', 'ext': 'mp4', 'title': 'Навальный вышел на свободу', 'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 87, 'view_count': int, 'age_limit': 0, }, }, { # http://muz-tv.ru/play/7129/ # http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true 'url': 'eagleplatform:media.clipyou.ru:12820', 'md5': '6c2ebeab03b739597ce8d86339d5a905', 'info_dict': { 'id': '12820', 'ext': 'mp4', 'title': "'O Sole Mio", 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 216, 'view_count': int, }, }] def _handle_error(self, response): status = int_or_none(response.get('status', 200)) if status != 200: raise ExtractorError(' '.join(response['errors']), expected=True) def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata'): response = super(EaglePlatformIE, self)._download_json(url_or_request, video_id, note) self._handle_error(response) return response def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id') player_data = self._download_json( 'http://%s/api/player_data?id=%s' % (host, video_id), video_id) media = player_data['data']['playlist']['viewports'][0]['medialist'][0] title = media['title'] description = media.get('description') thumbnail = media.get('snapshot') duration = int_or_none(media.get('duration')) view_count = int_or_none(media.get('views')) age_restriction = media.get('age_restriction') age_limit = None if age_restriction: age_limit = 0 if age_restriction == 'allow_all' else 18 m3u8_data = self._download_json( media['sources']['secure_m3u8']['auto'],<|fim▁hole|> formats = self._extract_m3u8_formats( m3u8_data['data'][0], video_id, 'mp4', entry_protocol='m3u8_native') self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'age_limit': age_limit, 'formats': formats, }<|fim▁end|>
video_id, 'Downloading m3u8 JSON')
<|file_name|>karma.hpp<|end_file_name|><|fim▁begin|>/*============================================================================= Copyright (c) 2001-2009 Joel de Guzman Copyright (c) 2001-2009 Hartmut Kaiser http://spirit.sourceforge.net/ Distributed under the Boost Software License, Version 1.0. (See accompanying<|fim▁hole|>#define BOOST_SPIRIT_INCLUDE_KARMA_REPOSITORY #if defined(_MSC_VER) #pragma once #endif #include <boost/spirit/repository/home/karma.hpp> #endif<|fim▁end|>
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) =============================================================================*/ #ifndef BOOST_SPIRIT_INCLUDE_KARMA_REPOSITORY
<|file_name|>proc_cci_mta_acceptor.cpp<|end_file_name|><|fim▁begin|>//proc_ace_acceptor.cpp #include <proc_mta_ace_acceptor.h> using namespace proc_mta_ace; using std::ostringstream; //---------------------------------------------------------------------------------------------------------------------------- proc_acceptor::proc_acceptor ( concurrency_t concurrency ) : concurrency_ { concurrency } , the_thread_pool_ ( private_thread_pool_ ) , m_instance_data{ nullptr } { ACE_Trace _( ACE_TEXT( "proc_acceptor::proc_acceptor ( concurrency_t concurrency )" ) , __LINE__ ); } //---------------------------------------------------------------------------------------------------------------------------- proc_acceptor::proc_acceptor ( proc_thread_pool &thread_pool ) : concurrency_ { concurrency_t::thread_pool_ }, the_thread_pool_ ( thread_pool ) , m_instance_data{ nullptr } { ACE_Trace _( ACE_TEXT( "proc_acceptor::proc_acceptor ( proc_thread_pool &thread_pool)" ) , __LINE__ ); } //---------------------------------------------------------------------------------------------------------------------------- proc_acceptor::~proc_acceptor ( void ) { ACE_Trace _( ACE_TEXT( "proc_acceptor::~proc_acceptor ( void )" ) , __LINE__ ); if ( concurrency() == concurrency_t::thread_pool_ && thread_pool_is_private () ) { thread_pool ()->close (); } } //---------------------------------------------------------------------------------------------------------------------------- int proc_acceptor::open ( const ACE_INET_Addr &addr, ACE_Reactor *reactor, protocol_data_ptr instance_data , int pool_size ) { ACE_Trace _( ACE_TEXT( "int proc_acceptor::open" ) , __LINE__ ); data( instance_data ); if ( concurrency() == concurrency_t::thread_pool_ && thread_pool_is_private() ) { thread_pool()->start( pool_size ); } <|fim▁hole|> return inherited::open ( addr , reactor ); } //---------------------------------------------------------------------------------------------------------------------------- int proc_acceptor::close ( void ) { ACE_Trace _( ACE_TEXT( "proc_acceptor::close" ) , __LINE__ ); if ( concurrency() == concurrency_t::thread_pool_ && thread_pool_is_private () ) { thread_pool ()->stop(); } return inherited::close(); }<|fim▁end|>
<|file_name|>answer.tsx<|end_file_name|><|fim▁begin|>import React from "react"; import { getAnswersByQuestion } from "../../selectors/report-tree"; import { connect } from "react-redux"; import { AnswerProps } from "../../util/answer-utils"; import { getQuestionIcon } from "../../util/question-utils"; import MultipleChoiceAnswer from "../../components/portal-dashboard/multiple-choice-answer"; import OpenResponseAnswer from "../../components/dashboard/open-response-answer"; import { ImageAnswer } from "../../components/portal-dashboard/answers/image-answer"; import IframeAnswer from "../../components/report/iframe-answer"; import css from "../../../css/portal-dashboard/answer.less"; class Answer extends React.PureComponent<AnswerProps> { constructor(props: AnswerProps) { super(props); } render() { const { answer, question, student } = this.props; const atype = answer && answer.get("type"); const QuestionIcon = getQuestionIcon(question); const key = `student-${student ? student.get("id") : "NA"}-question-${question ? question.get("id") : "NA"}`; return ( <div className={css.answer} data-cy="student-answer" key={key}> {answer && (!question.get("required") || answer.get("submitted"))<|fim▁hole|> } </div> ); } renderNoAnswer = (icon: any) => { const QuestionIcon = icon; return ( <div className={css.noAnswer}> <QuestionIcon /> No response </div> ); } renderAnswer = (type: string) => { const { answer, question, responsive, studentName, trackEvent, answerOrientation } = this.props; const AnswerComponent: any = { "multiple_choice_answer": MultipleChoiceAnswer, "open_response_answer": OpenResponseAnswer, "image_question_answer": ImageAnswer, "external_link": IframeAnswer, "interactive_state": IframeAnswer, }; const AComponent = (answer && (!question.get("required") || answer.get("submitted"))) ? AnswerComponent[type] : undefined; if (!AComponent) { return ( <div>Answer type not supported.</div> ); } else { return ( <AComponent answer={answer} question={question} showFullAnswer={true} responsive={responsive} studentName={studentName} trackEvent={trackEvent} answerOrientation={answerOrientation} /> ); } } } function mapStateToProps(state: any, ownProps: any): Partial<AnswerProps> { return { answer: getAnswersByQuestion(state) .getIn([ownProps.question.get("id"), ownProps.student.get("id")]) }; } const mapDispatchToProps = (dispatch: any, ownProps: any): Partial<AnswerProps> => { return {}; }; export default connect(mapStateToProps, mapDispatchToProps)(Answer);<|fim▁end|>
? this.renderAnswer(atype) : this.renderNoAnswer(QuestionIcon)
<|file_name|>calc.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! [Calc expressions][calc]. //! //! [calc]: https://drafts.csswg.org/css-values/#calc-notation use cssparser::{Parser, Token, NumberOrPercentage, AngleOrNumber}; use parser::ParserContext; #[allow(unused_imports)] use std::ascii::AsciiExt; use std::fmt; use style_traits::{ToCss, ParseError, StyleParseErrorKind}; use style_traits::values::specified::AllowedNumericType; use values::{CSSInteger, CSSFloat}; use values::computed; use values::specified::{Angle, Time}; use values::specified::length::{AbsoluteLength, FontRelativeLength, NoCalcLength}; use values::specified::length::ViewportPercentageLength; /// A node inside a `Calc` expression's AST. #[derive(Clone, Debug)] pub enum CalcNode { /// `<length>` Length(NoCalcLength), /// `<angle>` Angle(Angle), /// `<time>` Time(Time), /// `<percentage>` Percentage(CSSFloat), /// `<number>` Number(CSSFloat), /// An expression of the form `x + y` Sum(Box<CalcNode>, Box<CalcNode>), /// An expression of the form `x - y` Sub(Box<CalcNode>, Box<CalcNode>), /// An expression of the form `x * y` Mul(Box<CalcNode>, Box<CalcNode>), /// An expression of the form `x / y` Div(Box<CalcNode>, Box<CalcNode>), } /// An expected unit we intend to parse within a `calc()` expression. /// /// This is used as a hint for the parser to fast-reject invalid expressions. #[derive(Clone, Copy, PartialEq)] pub enum CalcUnit { /// `<number>` Number, /// `<integer>` Integer, /// `<length>` Length, /// `<percentage>` Percentage, /// `<length> | <percentage>` LengthOrPercentage, /// `<angle>` Angle, /// `<time>` Time, } /// A struct to hold a simplified `<length>` or `<percentage>` expression. /// /// In some cases, e.g. DOMMatrix, we support calc(), but reject all the relative lengths, and /// to_computed_pixel_length_without_context() handles this case. Therefore, if you want to add a /// new field, please make sure this function work properly. #[derive(Clone, Copy, Debug, Default, MallocSizeOf, PartialEq)] #[allow(missing_docs)] pub struct CalcLengthOrPercentage { pub clamping_mode: AllowedNumericType, pub absolute: Option<AbsoluteLength>, pub vw: Option<CSSFloat>, pub vh: Option<CSSFloat>, pub vmin: Option<CSSFloat>, pub vmax: Option<CSSFloat>, pub em: Option<CSSFloat>, pub ex: Option<CSSFloat>, pub ch: Option<CSSFloat>, pub rem: Option<CSSFloat>, pub percentage: Option<computed::Percentage>, } impl ToCss for CalcLengthOrPercentage { /// <https://drafts.csswg.org/css-values/#calc-serialize> /// /// FIXME(emilio): Should this simplify away zeros? #[allow(unused_assignments)] fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { use num_traits::Zero; let mut first_value = true; macro_rules! first_value_check { ($val:expr) => { if !first_value { dest.write_str(if $val < Zero::zero() { " - " } else { " + " })?; } else if $val < Zero::zero() { dest.write_str("-")?; } first_value = false; }; } macro_rules! serialize { ( $( $val:ident ),* ) => { $( if let Some(val) = self.$val { first_value_check!(val); val.abs().to_css(dest)?; dest.write_str(stringify!($val))?; } )* }; } macro_rules! serialize_abs { ( $( $val:ident ),+ ) => { $( if let Some(AbsoluteLength::$val(v)) = self.absolute { first_value_check!(v); AbsoluteLength::$val(v.abs()).to_css(dest)?; } )+ }; } dest.write_str("calc(")?; // NOTE(emilio): Percentages first because of web-compat problems, see: // https://github.com/w3c/csswg-drafts/issues/1731 if let Some(val) = self.percentage { first_value_check!(val.0); val.abs().to_css(dest)?; } // NOTE(emilio): The order here it's very intentional, and alphabetic // per the spec linked above. serialize!(ch); serialize_abs!(Cm); serialize!(em, ex); serialize_abs!(In, Mm, Pc, Pt, Px, Q); serialize!(rem, vh, vmax, vmin, vw); dest.write_str(")") } } impl CalcNode { /// Tries to parse a single element in the expression, that is, a /// `<length>`, `<angle>`, `<time>`, `<percentage>`, according to /// `expected_unit`. /// /// May return a "complex" `CalcNode`, in the presence of a parenthesized /// expression, for example. fn parse_one<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, expected_unit: CalcUnit ) -> Result<Self, ParseError<'i>> { let location = input.current_source_location(); // FIXME: remove early returns when lifetimes are non-lexical match (input.next()?, expected_unit) { (&Token::Number { value, .. }, _) => return Ok(CalcNode::Number(value)), (&Token::Dimension { value, ref unit, .. }, CalcUnit::Length) | (&Token::Dimension { value, ref unit, .. }, CalcUnit::LengthOrPercentage) => { return NoCalcLength::parse_dimension(context, value, unit) .map(CalcNode::Length) .map_err(|()| location.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } (&Token::Dimension { value, ref unit, .. }, CalcUnit::Angle) => { return Angle::parse_dimension(value, unit, /* from_calc = */ true) .map(CalcNode::Angle) .map_err(|()| location.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } (&Token::Dimension { value, ref unit, .. }, CalcUnit::Time) => { return Time::parse_dimension(value, unit, /* from_calc = */ true) .map(CalcNode::Time) .map_err(|()| location.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } (&Token::Percentage { unit_value, .. }, CalcUnit::LengthOrPercentage) | (&Token::Percentage { unit_value, .. }, CalcUnit::Percentage) => { return Ok(CalcNode::Percentage(unit_value)) } (&Token::ParenthesisBlock, _) => {} (&Token::Function(ref name), _) if name.eq_ignore_ascii_case("calc") => {} (t, _) => return Err(location.new_unexpected_token_error(t.clone())) } input.parse_nested_block(|i| { CalcNode::parse(context, i, expected_unit) }) } /// Parse a top-level `calc` expression, with all nested sub-expressions. /// /// This is in charge of parsing, for example, `2 + 3 * 100%`. fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, expected_unit: CalcUnit, ) -> Result<Self, ParseError<'i>> { let mut root = Self::parse_product(context, input, expected_unit)?; loop { let start = input.state(); match input.next_including_whitespace() { Ok(&Token::WhiteSpace(_)) => { if input.is_exhausted() {<|fim▁hole|> break; // allow trailing whitespace } // FIXME: remove clone() when lifetimes are non-lexical match input.next()?.clone() { Token::Delim('+') => { let rhs = Self::parse_product(context, input, expected_unit)?; let new_root = CalcNode::Sum(Box::new(root), Box::new(rhs)); root = new_root; } Token::Delim('-') => { let rhs = Self::parse_product(context, input, expected_unit)?; let new_root = CalcNode::Sub(Box::new(root), Box::new(rhs)); root = new_root; } t => return Err(input.new_unexpected_token_error(t)), } } _ => { input.reset(&start); break } } } Ok(root) } /// Parse a top-level `calc` expression, and all the products that may /// follow, and stop as soon as a non-product expression is found. /// /// This should parse correctly: /// /// * `2` /// * `2 * 2` /// * `2 * 2 + 2` (but will leave the `+ 2` unparsed). /// fn parse_product<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, expected_unit: CalcUnit) -> Result<Self, ParseError<'i>> { let mut root = Self::parse_one(context, input, expected_unit)?; loop { let start = input.state(); match input.next() { Ok(&Token::Delim('*')) => { let rhs = Self::parse_one(context, input, expected_unit)?; let new_root = CalcNode::Mul(Box::new(root), Box::new(rhs)); root = new_root; } // TODO(emilio): Figure out why the `Integer` check. Ok(&Token::Delim('/')) if expected_unit != CalcUnit::Integer => { let rhs = Self::parse_one(context, input, expected_unit)?; let new_root = CalcNode::Div(Box::new(root), Box::new(rhs)); root = new_root; } _ => { input.reset(&start); break } } } Ok(root) } /// Tries to simplify this expression into a `<length>` or `<percentage`> /// value. fn to_length_or_percentage(&self, clamping_mode: AllowedNumericType) -> Result<CalcLengthOrPercentage, ()> { let mut ret = CalcLengthOrPercentage { clamping_mode: clamping_mode, .. Default::default() }; self.add_length_or_percentage_to(&mut ret, 1.0)?; Ok(ret) } /// Tries to simplify this expression into a `<percentage>` value. fn to_percentage(&self) -> Result<CSSFloat, ()> { Ok(match *self { CalcNode::Percentage(percentage) => percentage, CalcNode::Sub(ref a, ref b) => { a.to_percentage()? - b.to_percentage()? } CalcNode::Sum(ref a, ref b) => { a.to_percentage()? + b.to_percentage()? } CalcNode::Mul(ref a, ref b) => { match a.to_percentage() { Ok(lhs) => { let rhs = b.to_number()?; lhs * rhs } Err(..) => { let lhs = a.to_number()?; let rhs = b.to_percentage()?; lhs * rhs } } } CalcNode::Div(ref a, ref b) => { let lhs = a.to_percentage()?; let rhs = b.to_number()?; if rhs == 0. { return Err(()) } lhs / rhs } CalcNode::Number(..) | CalcNode::Length(..) | CalcNode::Angle(..) | CalcNode::Time(..) => return Err(()), }) } /// Puts this `<length>` or `<percentage>` into `ret`, or error. /// /// `factor` is the sign or multiplicative factor to account for the sign /// (this allows adding and substracting into the return value). fn add_length_or_percentage_to( &self, ret: &mut CalcLengthOrPercentage, factor: CSSFloat) -> Result<(), ()> { match *self { CalcNode::Percentage(pct) => { ret.percentage = Some(computed::Percentage( ret.percentage.map_or(0., |p| p.0) + pct * factor, )); } CalcNode::Length(ref l) => { match *l { NoCalcLength::Absolute(abs) => { ret.absolute = Some( match ret.absolute { Some(value) => value + abs * factor, None => abs * factor, } ); } NoCalcLength::FontRelative(rel) => { match rel { FontRelativeLength::Em(em) => { ret.em = Some(ret.em.unwrap_or(0.) + em * factor); } FontRelativeLength::Ex(ex) => { ret.ex = Some(ret.ex.unwrap_or(0.) + ex * factor); } FontRelativeLength::Ch(ch) => { ret.ch = Some(ret.ch.unwrap_or(0.) + ch * factor); } FontRelativeLength::Rem(rem) => { ret.rem = Some(ret.rem.unwrap_or(0.) + rem * factor); } } } NoCalcLength::ViewportPercentage(rel) => { match rel { ViewportPercentageLength::Vh(vh) => { ret.vh = Some(ret.vh.unwrap_or(0.) + vh * factor) } ViewportPercentageLength::Vw(vw) => { ret.vw = Some(ret.vw.unwrap_or(0.) + vw * factor) } ViewportPercentageLength::Vmax(vmax) => { ret.vmax = Some(ret.vmax.unwrap_or(0.) + vmax * factor) } ViewportPercentageLength::Vmin(vmin) => { ret.vmin = Some(ret.vmin.unwrap_or(0.) + vmin * factor) } } } NoCalcLength::ServoCharacterWidth(..) => unreachable!(), } } CalcNode::Sub(ref a, ref b) => { a.add_length_or_percentage_to(ret, factor)?; b.add_length_or_percentage_to(ret, factor * -1.0)?; } CalcNode::Sum(ref a, ref b) => { a.add_length_or_percentage_to(ret, factor)?; b.add_length_or_percentage_to(ret, factor)?; } CalcNode::Mul(ref a, ref b) => { match b.to_number() { Ok(rhs) => { a.add_length_or_percentage_to(ret, factor * rhs)?; } Err(..) => { let lhs = a.to_number()?; b.add_length_or_percentage_to(ret, factor * lhs)?; } } } CalcNode::Div(ref a, ref b) => { let new_factor = b.to_number()?; if new_factor == 0. { return Err(()); } a.add_length_or_percentage_to(ret, factor / new_factor)?; } CalcNode::Angle(..) | CalcNode::Time(..) | CalcNode::Number(..) => return Err(()), } Ok(()) } /// Tries to simplify this expression into a `<time>` value. fn to_time(&self) -> Result<Time, ()> { Ok(match *self { CalcNode::Time(ref time) => time.clone(), CalcNode::Sub(ref a, ref b) => { let lhs = a.to_time()?; let rhs = b.to_time()?; Time::from_calc(lhs.seconds() - rhs.seconds()) } CalcNode::Sum(ref a, ref b) => { let lhs = a.to_time()?; let rhs = b.to_time()?; Time::from_calc(lhs.seconds() + rhs.seconds()) } CalcNode::Mul(ref a, ref b) => { match b.to_number() { Ok(rhs) => { let lhs = a.to_time()?; Time::from_calc(lhs.seconds() * rhs) } Err(()) => { let lhs = a.to_number()?; let rhs = b.to_time()?; Time::from_calc(lhs * rhs.seconds()) } } } CalcNode::Div(ref a, ref b) => { let lhs = a.to_time()?; let rhs = b.to_number()?; if rhs == 0. { return Err(()) } Time::from_calc(lhs.seconds() / rhs) } CalcNode::Number(..) | CalcNode::Length(..) | CalcNode::Percentage(..) | CalcNode::Angle(..) => return Err(()), }) } /// Tries to simplify this expression into an `Angle` value. fn to_angle(&self) -> Result<Angle, ()> { Ok(match *self { CalcNode::Angle(ref angle) => angle.clone(), CalcNode::Sub(ref a, ref b) => { let lhs = a.to_angle()?; let rhs = b.to_angle()?; Angle::from_calc(lhs.radians() - rhs.radians()) } CalcNode::Sum(ref a, ref b) => { let lhs = a.to_angle()?; let rhs = b.to_angle()?; Angle::from_calc(lhs.radians() + rhs.radians()) } CalcNode::Mul(ref a, ref b) => { match a.to_angle() { Ok(lhs) => { let rhs = b.to_number()?; Angle::from_calc(lhs.radians() * rhs) } Err(..) => { let lhs = a.to_number()?; let rhs = b.to_angle()?; Angle::from_calc(lhs * rhs.radians()) } } } CalcNode::Div(ref a, ref b) => { let lhs = a.to_angle()?; let rhs = b.to_number()?; if rhs == 0. { return Err(()) } Angle::from_calc(lhs.radians() / rhs) } CalcNode::Number(..) | CalcNode::Length(..) | CalcNode::Percentage(..) | CalcNode::Time(..) => return Err(()), }) } /// Tries to simplify this expression into a `<number>` value. fn to_number(&self) -> Result<CSSFloat, ()> { Ok(match *self { CalcNode::Number(n) => n, CalcNode::Sum(ref a, ref b) => { a.to_number()? + b.to_number()? } CalcNode::Sub(ref a, ref b) => { a.to_number()? - b.to_number()? } CalcNode::Mul(ref a, ref b) => { a.to_number()? * b.to_number()? } CalcNode::Div(ref a, ref b) => { let lhs = a.to_number()?; let rhs = b.to_number()?; if rhs == 0. { return Err(()) } lhs / rhs } CalcNode::Length(..) | CalcNode::Percentage(..) | CalcNode::Angle(..) | CalcNode::Time(..) => return Err(()), }) } /// Convenience parsing function for integers. pub fn parse_integer<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't> ) -> Result<CSSInteger, ParseError<'i>> { Self::parse(context, input, CalcUnit::Integer)? .to_number() .map(|n| n as CSSInteger) .map_err(|()| input.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } /// Convenience parsing function for `<length> | <percentage>`. pub fn parse_length_or_percentage<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, clamping_mode: AllowedNumericType ) -> Result<CalcLengthOrPercentage, ParseError<'i>> { Self::parse(context, input, CalcUnit::LengthOrPercentage)? .to_length_or_percentage(clamping_mode) .map_err(|()| input.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } /// Convenience parsing function for percentages. pub fn parse_percentage<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't> ) -> Result<CSSFloat, ParseError<'i>> { Self::parse(context, input, CalcUnit::Percentage)? .to_percentage() .map_err(|()| input.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } /// Convenience parsing function for `<length>`. pub fn parse_length<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, clamping_mode: AllowedNumericType ) -> Result<CalcLengthOrPercentage, ParseError<'i>> { Self::parse(context, input, CalcUnit::Length)? .to_length_or_percentage(clamping_mode) .map_err(|()| input.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } /// Convenience parsing function for `<number>`. pub fn parse_number<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't> ) -> Result<CSSFloat, ParseError<'i>> { Self::parse(context, input, CalcUnit::Number)? .to_number() .map_err(|()| input.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } /// Convenience parsing function for `<angle>`. pub fn parse_angle<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't> ) -> Result<Angle, ParseError<'i>> { Self::parse(context, input, CalcUnit::Angle)? .to_angle() .map_err(|()| input.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } /// Convenience parsing function for `<time>`. pub fn parse_time<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't> ) -> Result<Time, ParseError<'i>> { Self::parse(context, input, CalcUnit::Time)? .to_time() .map_err(|()| input.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } /// Convenience parsing function for `<number>` or `<percentage>`. pub fn parse_number_or_percentage<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't> ) -> Result<NumberOrPercentage, ParseError<'i>> { let node = Self::parse(context, input, CalcUnit::Percentage)?; if let Ok(value) = node.to_number() { return Ok(NumberOrPercentage::Number { value }) } match node.to_percentage() { Ok(unit_value) => Ok(NumberOrPercentage::Percentage { unit_value }), Err(()) => Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)), } } /// Convenience parsing function for `<number>` or `<angle>`. pub fn parse_angle_or_number<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't> ) -> Result<AngleOrNumber, ParseError<'i>> { let node = Self::parse(context, input, CalcUnit::Angle)?; if let Ok(angle) = node.to_angle() { let degrees = angle.degrees(); return Ok(AngleOrNumber::Angle { degrees }) } match node.to_number() { Ok(value) => Ok(AngleOrNumber::Number { value }), Err(()) => Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)), } } }<|fim▁end|>
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main func main() {<|fim▁hole|><|fim▁end|>
// Goes nowhere, does nothing }
<|file_name|>common.py<|end_file_name|><|fim▁begin|># This file is distributed under the terms of the GNU General Public license. # Copyright (C) 1999 Aloril (See the file COPYING for details). import time from mind.Goal import Goal # goals for minds def false(_): return False def true(_): return True class Delayed(Goal): """Will delay execution of sub goals until the specified time.""" def __init__(self, time: float, sub_goals: list, desc="A delayed goal."): Goal.__init__(self, desc=desc, fulfilled=self.is_right_time, sub_goals=sub_goals) self.time = time def is_right_time(self, me): # Return "false" when the time is right is_right = time.time() < self.time return is_right class OneShot(Goal): """Will remove itself after the first successful execution of its subgoals.""" def __init__(self, sub_goals, desc="Executed once."): Goal.__init__(self, desc=desc, sub_goals=sub_goals) def check_goal_recursively(self, me, depth, debug_info): res, debug_info = super().check_goal_recursively(me, depth, debug_info) if res: self.irrelevant = True return res, debug_info class DelayedOneShot(Goal): """Combines delayed execution with one shot. Useful when you want to perform one action once after a certain time.""" def __init__(self, sub_goals, desc="Executed once after a delay"): Goal.__init__(self, desc=desc, sub_goals=[OneShot(sub_goals=[Delayed(time=time.time() + 1, sub_goals=sub_goals)])]) class Condition(Goal): """ A conditional goal which first executes a function, and then sets the subgoals to one of two possibilities. If the condition function returns None then none of the subgoals will be executed. """ def __init__(self, condition_fn, goals_true, goals_false, desc="condition"):<|fim▁hole|> self.goals_false = goals_false def assess_condition(self, me): result = self.condition_fn(me) if result is None: return True if result: self.sub_goals = self.goals_true else: self.sub_goals = self.goals_false return False class Sequence(Goal): """A goal which will check on all subgoals in order.""" def __init__(self, sub_goals, desc="Sequence of goals"): Goal.__init__(self, desc=desc, sub_goals=sub_goals) def get_reach(me): reach = 0 own_reach = me.entity.get_prop_float('reach') if own_reach: reach += own_reach attached_current = me.get_attached_entity("hand_primary") if attached_current: attached_reach = attached_current.get_prop_float('reach') if attached_reach: reach += attached_reach return reach def get_focused_location(me, what): thing = get_focused_thing(me, what) if thing: return thing.location return None def get_focused_thing(me, what): focus_id = me.get_knowledge('focus', what) if focus_id is None: return None thing = me.map.get(focus_id) if thing is None: me.remove_knowledge('focus', what) return None return thing def get_task(me, task_name): """Gets the task by the name from the 'tasks' property, if it exists.""" tasks_prop = me.entity.get_prop_map('tasks') if tasks_prop and task_name in tasks_prop: return tasks_prop[task_name]<|fim▁end|>
Goal.__init__(self, desc=desc, fulfilled=self.assess_condition) self.condition_fn = condition_fn self.goals_true = goals_true
<|file_name|>bitcoin_sah.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="sah" version="2.1"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About altcommunitycoin</source> <translation type="unfinished"/> </message> <message> <location line="+39"/> <source>&lt;b&gt;altcommunitycoin&lt;/b&gt; version</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Copyright © 2009-2014 The Bitcoin developers Copyright © 2012-2014 The NovaCoin developers Copyright © 2014 The altcommunitycoin developers</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or &lt;a href=&quot;http://www.opensource.org/licenses/mit-license.php&quot;&gt;http://www.opensource.org/licenses/mit-license.php&lt;/a&gt;. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (&lt;a href=&quot;https://www.openssl.org/&quot;&gt;https://www.openssl.org/&lt;/a&gt;) and cryptographic software written by Eric Young (&lt;a href=&quot;mailto:[email protected]&quot;&gt;[email protected]&lt;/a&gt;) and UPnP software written by Thomas Bernard.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <source>Double-click to edit address or label</source> <translation>Аадырыскын уларытаргар иккитэ баттаа</translation> </message> <message> <location line="+24"/> <source>Create a new address</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Copy the currently selected address to the system clipboard</source> <translation type="unfinished"/> </message> <message> <location line="-7"/> <source>&amp;New Address</source> <translation type="unfinished"/> </message> <message> <location line="-43"/> <source>These are your altcommunitycoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation type="unfinished"/> </message> <message> <location line="+53"/> <source>&amp;Copy Address</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Show &amp;QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Sign a message to prove you own a altcommunitycoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Delete the currently selected address from the list</source> <translation type="unfinished"/> </message> <message> <location line="-10"/> <source>Verify a message to ensure it was signed with a specified altcommunitycoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>&amp;Delete</source> <translation type="unfinished"/> </message> <message> <location filename="../addressbookpage.cpp" line="+66"/> <source>Copy &amp;Label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&amp;Edit</source> <translation type="unfinished"/> </message> <message> <location line="+248"/> <source>Export Address Book Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+145"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Address</source> <translation type="unfinished"/> </message> <message> <location line="+36"/> <source>(no label)</source> <translation type="unfinished"/> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation type="unfinished"/> </message> <message> <location line="+33"/> <source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>For staking only</source> <translation type="unfinished"/> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+38"/> <source>Encrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+45"/> <source>Confirm wallet encryption</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR COINS&lt;/b&gt;!</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+103"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation type="unfinished"/> </message> <message> <location line="-133"/> <location line="+60"/> <source>Wallet encrypted</source> <translation type="unfinished"/> </message> <message> <location line="-140"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;ten or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <source>altcommunitycoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+44"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation type="unfinished"/> </message> <message> <location line="-56"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <location line="+50"/> <source>The supplied passphrases do not match.</source> <translation type="unfinished"/> </message> <message> <location line="-38"/> <source>Wallet unlock failed</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <location line="+12"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation type="unfinished"/> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation type="unfinished"/> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+297"/> <source>Sign &amp;message...</source> <translation type="unfinished"/> </message> <message> <location line="-64"/> <source>Show general overview of wallet</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>&amp;Transactions</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>&amp;Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Edit the list of stored addresses and labels</source> <translation type="unfinished"/> </message> <message> <location line="-18"/> <source>Show the list of addresses for receiving payments</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>E&amp;xit</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Quit application</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Show information about altcommunitycoin</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>&amp;Encrypt Wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Backup Wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Export...</source> <translation type="unfinished"/> </message> <message> <location line="-55"/> <source>Send coins to a altcommunitycoin address</source> <translation type="unfinished"/> </message> <message> <location line="+39"/> <source>Modify configuration options for altcommunitycoin</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="-13"/> <source>Encrypt or decrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Backup wallet to another location</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>&amp;Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>&amp;Verify message...</source> <translation type="unfinished"/> </message> <message> <location line="-214"/> <location line="+555"/> <source>altcommunitycoin</source> <translation type="unfinished"/> </message> <message> <location line="-555"/> <source>Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+193"/> <source>&amp;About altcommunitycoin</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Unlock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&amp;Lock Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Lock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>&amp;File</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>&amp;Settings</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>&amp;Help</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Tabs toolbar</source> <translation type="unfinished"/> </message> <message> <location line="+46"/> <location line="+9"/> <source>[testnet]</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <location line="+58"/> <source>altcommunitycoin client</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+70"/> <source>%n active connection(s) to altcommunitycoin network</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+488"/> <source>Staking.&lt;br&gt;Your weight is %1&lt;br&gt;Network weight is %2&lt;br&gt;Expected time to earn reward is %3</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Not staking because wallet is locked</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is offline</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is syncing</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because you don&apos;t have mature coins</source> <translation type="unfinished"/> </message> <message> <location line="-812"/> <source>&amp;Dashboard</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>&amp;Receive</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>&amp;Send</source> <translation type="unfinished"/> </message> <message> <location line="+49"/> <source>&amp;Unlock Wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+277"/> <source>Up to date</source> <translation type="unfinished"/> </message> <message> <location line="+43"/> <source>Catching up...</source> <translation type="unfinished"/> </message> <message> <location line="+113"/> <source>Confirm transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Sent transaction</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Incoming transaction</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <location line="+15"/> <source>URI handling</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <location line="+15"/> <source>URI can not be parsed! This can be caused by an invalid altcommunitycoin address or malformed URI parameters.</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Wallet is &lt;b&gt;not encrypted&lt;/b&gt;</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>Backup Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+91"/> <source>%n second(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n minute(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="-429"/> <location line="+433"/> <source>%n hour(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="-456"/> <source>Processed %1 blocks of transaction history.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+27"/> <location line="+433"/> <source>%n day(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="-429"/> <location line="+6"/> <source>%n week(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+0"/> <source>%1 and %2</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+0"/> <source>%n year(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+5"/> <source>%1 behind</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Last received block was generated %1 ago.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Transactions after this will not yet be visible.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Error</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Information</source> <translation type="unfinished"/> </message> <message> <location line="+69"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation type="unfinished"/> </message> <message> <location line="+324"/> <source>Not staking</source> <translation type="unfinished"/> </message> <message> <location filename="../bitcoin.cpp" line="+104"/> <source>A fatal error occurred. altcommunitycoin can no longer continue safely and will quit.</source> <translation type="unfinished"/> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+110"/> <source>Network Alert</source> <translation type="unfinished"/> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Quantity:</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Bytes:</source> <translation type="unfinished"/> </message> <message> <location line="+48"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Priority:</source> <translation type="unfinished"/> </message> <message> <location line="+48"/> <source>Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location filename="../coincontroldialog.cpp" line="+537"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location filename="../forms/coincontroldialog.ui" line="+51"/> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Change:</source> <translation type="unfinished"/> </message> <message> <location line="+69"/> <source>(un)select all</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Tree mode</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>List mode</source> <translation type="unfinished"/> </message> <message> <location line="+45"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Address</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Date</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Priority</source> <translation type="unfinished"/> </message> <message> <location filename="../coincontroldialog.cpp" line="-500"/> <source>Copy address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation type="unfinished"/> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>Copy quantity</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation type="unfinished"/> </message> <message> <location line="+317"/> <source>highest</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>high</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>medium-high</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>low-medium</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>low</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>lowest</source> <translation type="unfinished"/> </message> <message> <location line="+140"/> <source>DUST</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>yes</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>This label turns red, if the transaction size is bigger than 10000 bytes. This means a fee of at least %1 per kb is required. Can vary +/- 1 Byte per input.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transactions with higher priority get more likely into a block. This label turns red, if the priority is smaller than &quot;medium&quot;. This means a fee of at least %1 per kb is required.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if any recipient receives an amount smaller than %1. This means a fee of at least %2 is required. Amounts below 0.546 times the minimum relay fee are shown as DUST.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if the change is smaller than %1. This means a fee of at least %2 is required.</source> <translation type="unfinished"/> </message> <message> <location line="+36"/> <location line="+66"/> <source>(no label)</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>(change)</source> <translation type="unfinished"/> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation type="unfinished"/> </message> <message> <location filename="../editaddressdialog.cpp" line="+21"/> <source>New receiving address</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>New sending address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation type="unfinished"/> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid altcommunitycoin address.</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation type="unfinished"/> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+426"/> <location line="+12"/> <source>altcommunitycoin-Qt</source> <translation type="unfinished"/> </message> <message> <location line="-12"/> <source>version</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>UI options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"/> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Reserved amount does not participate in staking and is therefore spendable at any time.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Reserve</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Automatically start altcommunitycoin after logging in to the system.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Start altcommunitycoin on system login</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Network</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Automatically open the altcommunitycoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Proxy &amp;IP:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation type="unfinished"/> </message> <message> <location line="-57"/> <source>Connect to the altcommunitycoin network through a SOCKS5 proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS5 proxy:</source> <translation type="unfinished"/> </message> <message> <location line="+90"/> <source>&amp;Window</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting altcommunitycoin.</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Whether to show coin control features or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only!)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Whether to select the coin outputs randomly or with minimal coin age.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Minimize weight consumption (experimental)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Use black visual theme (requires restart)</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation type="unfinished"/> </message> <message> <location filename="../optionsdialog.cpp" line="+47"/> <source>default</source> <translation type="unfinished"/> </message> <message> <location line="+148"/> <location line="+9"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting altcommunitycoin.</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation type="unfinished"/> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation type="unfinished"/> </message> <message> <location line="+46"/> <location line="+247"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the altcommunitycoin network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"/> </message> <message> <location line="-173"/> <source>Stake:</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Unconfirmed:</source> <translation type="unfinished"/> </message> <message> <location line="-113"/> <source>Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+49"/> <source>Spendable:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current spendable balance</source> <translation type="unfinished"/> </message> <message> <location line="+80"/> <source>Immature:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Total:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current total balance</source> <translation type="unfinished"/> </message> <message> <location line="+50"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation type="unfinished"/> </message> <message> <location line="-118"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location line="-32"/> <source>Total of coins that was staked, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location filename="../overviewpage.cpp" line="+116"/> <location line="+1"/> <source>out of sync</source> <translation type="unfinished"/> </message> </context> <context> <name>PaymentServer</name> <message> <location filename="../paymentserver.cpp" line="+107"/> <source>Cannot start altcommunitycoin: click-to-pay handler</source> <translation type="unfinished"/> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation type="unfinished"/> </message> <message> <location line="+56"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Label:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Message:</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation type="unfinished"/> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation type="unfinished"/> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <source>N/A</source> <translation type="unfinished"/> </message> <message> <location line="-194"/> <source>Client version</source> <translation type="unfinished"/> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation type="unfinished"/> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation type="unfinished"/> </message> <message> <location line="+49"/> <source>Startup time</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Network</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>On testnet</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Block chain</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation type="unfinished"/> </message> <message> <location line="+197"/> <source>&amp;Network Traffic</source> <translation type="unfinished"/> </message> <message> <location line="+52"/> <source>&amp;Clear</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Totals</source> <translation type="unfinished"/> </message> <message> <location line="+64"/> <source>In:</source> <translation type="unfinished"/> </message> <message> <location line="+80"/> <source>Out:</source> <translation type="unfinished"/> </message> <message> <location line="-383"/> <source>Last block time</source> <translation type="unfinished"/> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Show the altcommunitycoin-Qt help message to get a list with possible altcommunitycoin command-line options.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation type="unfinished"/> </message> <message> <location line="-237"/> <source>Build date</source> <translation type="unfinished"/> </message> <message> <location line="-104"/> <source>altcommunitycoin - Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>altcommunitycoin Core</source> <translation type="unfinished"/> </message> <message> <location line="+256"/> <source>Debug log file</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Open the altcommunitycoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"/> </message> <message> <location line="+102"/> <source>Clear console</source> <translation type="unfinished"/> </message> <message> <location filename="../rpcconsole.cpp" line="+325"/> <source>Welcome to the altcommunitycoin RPC console.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation type="unfinished"/> </message> <message> <location line="+127"/> <source>%1 B</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 KB</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 MB</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 GB</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>%1 m</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>%1 h</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 h %2 m</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+182"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation type="unfinished"/> </message> <message> <location line="+76"/> <source>Coin Control Features</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Insufficient funds!</source> <translation type="unfinished"/> </message> <message> <location line="+77"/> <source>Quantity:</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <location line="+35"/> <source>0</source> <translation type="unfinished"/> </message> <message> <location line="-19"/> <source>Bytes:</source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Priority:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Change</source> <translation type="unfinished"/> </message> <message> <location line="+50"/> <source>custom change address</source> <translation type="unfinished"/> </message> <message> <location line="+106"/> <source>Send to multiple recipients at once</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Remove all transaction fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>Balance:</source> <translation type="unfinished"/> </message> <message> <location line="+47"/> <source>Confirm the send action</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-174"/> <source>Enter a altcommunitycoin address (e.g. SjBj1QvJvsAkU5EBKggdZ8gWc4oK2F5AMY)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Copy quantity</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation type="unfinished"/> </message> <message> <location line="+87"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source> and </source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The recipient address is not valid, please recheck.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed!</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+247"/> <source>WARNING: Invalid altcommunitycoin address</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>(no label)</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>WARNING: unknown change address</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>The address to send the payment to (e.g. SjBj1QvJvsAkU5EBKggdZ8gWc4oK2F5AMY)</source> <translation type="unfinished"/> </message> <message> <location line="+60"/> <location filename="../sendcoinsentry.cpp" line="+26"/> <source>Enter a label for this address to add it to your address book</source> <translation type="unfinished"/> </message> <message> <location line="-78"/> <source>&amp;Label:</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Choose address from address book</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a altcommunitycoin address (e.g. SjBj1QvJvsAkU5EBKggdZ8gWc4oK2F5AMY)</source> <translation type="unfinished"/> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+124"/> <source>&amp;Sign Message</source> <translation type="unfinished"/> </message> <message> <location line="-118"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. SjBj1QvJvsAkU5EBKggdZ8gWc4oK2F5AMY)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+203"/> <source>Choose an address from the address book</source> <translation type="unfinished"/> </message> <message> <location line="-193"/> <location line="+203"/> <source>Alt+A</source> <translation type="unfinished"/> </message> <message> <location line="-193"/> <source>Paste address from clipboard</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>Copy the current signature to the system clipboard</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this altcommunitycoin address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all sign message fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation type="unfinished"/> </message> <message> <location line="-87"/> <location line="+70"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="-64"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. SjBj1QvJvsAkU5EBKggdZ8gWc4oK2F5AMY)</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified altcommunitycoin address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all verify message fields</source> <translation type="unfinished"/> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a altcommunitycoin address (e.g. SjBj1QvJvsAkU5EBKggdZ8gWc4oK2F5AMY)</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Enter altcommunitycoin signature</source> <translation type="unfinished"/> </message> <message> <location line="+85"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation type="unfinished"/> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation type="unfinished"/> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation type="unfinished"/> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation type="unfinished"/> </message> </context> <context> <name>TrafficGraphWidget</name> <message> <location filename="../trafficgraphwidget.cpp" line="+75"/> <source>KB/s</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+25"/> <source>Open until %1</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/offline</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Status</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation type="unfinished"/> </message><|fim▁hole|> <message> <location line="+7"/> <source>Source</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Generated</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <location line="+13"/> <source>From</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <location line="+19"/> <location line="+58"/> <source>To</source> <translation type="unfinished"/> </message> <message> <location line="-74"/> <location line="+2"/> <source>own address</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>label</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation type="unfinished"/> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation type="unfinished"/> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Net amount</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Message</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Comment</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Debug information</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Transaction</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Inputs</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>true</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>false</source> <translation type="unfinished"/> </message> <message> <location line="-202"/> <source>, has not been successfully broadcast yet</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-36"/> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+67"/> <source>unknown</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+231"/> <source>Date</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Type</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Address</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+52"/> <source>Open until %1</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Confirmed (%1 confirmations)</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-15"/> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Offline</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Immature (%1 confirmations, will be available after %2)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Received with</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Received from</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sent to</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Mined</source> <translation type="unfinished"/> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation type="unfinished"/> </message> <message> <location line="+194"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+54"/> <location line="+17"/> <source>All</source> <translation type="unfinished"/> </message> <message> <location line="-16"/> <source>Today</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This week</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This month</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Last month</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This year</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Range...</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Received with</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Sent to</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>To yourself</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Mined</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Other</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Min amount</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>Copy address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Edit label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation type="unfinished"/> </message> <message> <location line="+138"/> <source>Export Transaction Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Date</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Type</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>ID</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <source>Range:</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>to</source> <translation type="unfinished"/> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+212"/> <source>Sending...</source> <translation type="unfinished"/> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+171"/> <source>altcommunitycoin version</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Send command to -server or altcommunitycoind</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>List commands</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Get help for a command</source> <translation type="unfinished"/> </message> <message> <location line="-145"/> <source>Options:</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify configuration file (default: altcommunitycoin.conf)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify pid file (default: altcommunitycoind.pid)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify wallet file (within data directory)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation type="unfinished"/> </message> <message> <location line="-25"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=altcommunitycoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;altcommunitycoin Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Set database cache size in megabytes (default: 25)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set database disk log size in megabytes (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Listen for connections on &lt;port&gt; (default: 16178 or testnet: 25714)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify your own public address</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Bind to given address. Use [host]:port notation for IPv6</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Always query for peer addresses via DNS lookup (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation type="unfinished"/> </message> <message> <location line="-35"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation type="unfinished"/> </message> <message> <location line="+62"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 16174 or testnet: 25715)</source> <translation type="unfinished"/> </message> <message> <location line="-16"/> <source>Accept command line and JSON-RPC commands</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Run in the background as a daemon and accept commands</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use the test network</source> <translation type="unfinished"/> </message> <message> <location line="-23"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation type="unfinished"/> </message> <message> <location line="-28"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation type="unfinished"/> </message> <message> <location line="+93"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation type="unfinished"/> </message> <message> <location line="-103"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong altcommunitycoin will not work properly.</source> <translation type="unfinished"/> </message> <message> <location line="+130"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation type="unfinished"/> </message> <message> <location line="-16"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation type="unfinished"/> </message> <message> <location line="-34"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Block creation options:</source> <translation type="unfinished"/> </message> <message> <location line="-67"/> <source>Connect only to the specified node(s)</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation type="unfinished"/> </message> <message> <location line="+101"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Invalid amount for -reservebalance=&lt;amount&gt;</source> <translation type="unfinished"/> </message> <message> <location line="-89"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation type="unfinished"/> </message> <message> <location line="-16"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation type="unfinished"/> </message> <message> <location line="+30"/> <source>Prepend debug output with timestamp</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation type="unfinished"/> </message> <message> <location line="-38"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="-34"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation type="unfinished"/> </message> <message> <location line="-41"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation type="unfinished"/> </message> <message> <location line="-25"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation type="unfinished"/> </message> <message> <location line="+45"/> <source>Username for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="+54"/> <source>Verifying database integrity...</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Error: Wallet locked, unable to create transaction!</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Error: Transaction creation failed!</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Information</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation type="unfinished"/> </message> <message> <location line="-52"/> <source>wallet.dat corrupt, salvage failed</source> <translation type="unfinished"/> </message> <message> <location line="-59"/> <source>Password for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="-47"/> <source>Connect through SOCKS5 proxy</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Output debugging information (default: 0, supplying &lt;category&gt; is optional)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>If &lt;category&gt; is not supplied, output all debugging information.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&lt;category&gt; can be:</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Wait for RPC server to start</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set the number of threads to service RPC calls (default: 4)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Require a confirmations for change (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Upgrade wallet to latest format</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Rescan the block chain for missing wallet transactions</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>How thorough the block verification is (0-6, default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Imports blocks from external blk000?.dat file</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Keep at most &lt;n&gt; MiB of unconnectable blocks in memory (default: %u)</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Server certificate file (default: server.cert)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: Unsupported argument -socks found. Setting SOCKS version isn&apos;t possible anymore, only SOCKS5 proxies are supported.</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Initialization sanity check failed. altcommunitycoin is shutting down.</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Error loading block database</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Error: Wallet unlocked for staking only, unable to create transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Error: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation type="unfinished"/> </message> <message> <location line="-168"/> <source>This help message</source> <translation type="unfinished"/> </message> <message> <location line="+104"/> <source>Wallet %s resides outside data directory %s.</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation type="unfinished"/> </message> <message> <location line="-129"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation type="unfinished"/> </message> <message> <location line="+125"/> <source>Loading addresses...</source> <translation type="unfinished"/> </message> <message> <location line="-10"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error loading wallet.dat: Wallet requires newer version of altcommunitycoin</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Wallet needed to be rewritten: restart altcommunitycoin to complete</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error loading wallet.dat</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="-22"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+58"/> <source>Sending...</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Invalid amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Insufficient funds</source> <translation type="unfinished"/> </message> <message> <location line="-40"/> <source>Loading block index...</source> <translation type="unfinished"/> </message> <message> <location line="-109"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation type="unfinished"/> </message> <message> <location line="+124"/> <source>Unable to bind to %s on this computer. altcommunitycoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-101"/> <source>Fee per KB to add to transactions you send</source> <translation type="unfinished"/> </message> <message> <location line="+33"/> <source>Minimize weight consumption (experimental) (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>How many blocks to check at startup (default: 500, 0 = all)</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Invalid amount for -mininput=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Cannot obtain a lock on data directory %s. altcommunitycoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error initializing wallet database environment %s!</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Loading wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Cannot downgrade wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot write default address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Rescanning...</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Done loading</source> <translation type="unfinished"/> </message> <message> <location line="-159"/> <source>To use the %s option</source> <translation type="unfinished"/> </message> <message> <location line="+186"/> <source>Error</source> <translation type="unfinished"/> </message> <message> <location line="-18"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation type="unfinished"/> </message> </context> </TS><|fim▁end|>
<|file_name|>hsv_rgb.hpp<|end_file_name|><|fim▁begin|>/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #pragma once #include <kernel_headers/hsv_rgb.hpp> #include <program.hpp> #include <traits.hpp> #include <string> #include <mutex> #include <map> #include <dispatch.hpp> #include <Param.hpp> #include <debug_opencl.hpp> using cl::Buffer; using cl::Program; using cl::Kernel; using cl::KernelFunctor; using cl::EnqueueArgs; using cl::NDRange; using std::string; namespace opencl { namespace kernel { static const int THREADS_X = 16; static const int THREADS_Y = 16; template<typename T, bool isHSV2RGB> void hsv2rgb_convert(Param out, const Param in)<|fim▁hole|> try { static std::once_flag compileFlags[DeviceManager::MAX_DEVICES]; static std::map<int, Program*> hrProgs; static std::map<int, Kernel*> hrKernels; int device = getActiveDeviceId(); std::call_once( compileFlags[device], [device] () { std::ostringstream options; options << " -D T=" << dtype_traits<T>::getName(); if(isHSV2RGB) options << " -D isHSV2RGB"; if (std::is_same<T, double>::value) { options << " -D USE_DOUBLE"; } Program prog; buildProgram(prog, hsv_rgb_cl, hsv_rgb_cl_len, options.str()); hrProgs[device] = new Program(prog); hrKernels[device] = new Kernel(*hrProgs[device], "convert"); }); NDRange local(THREADS_X, THREADS_Y); int blk_x = divup(in.info.dims[0], THREADS_X); int blk_y = divup(in.info.dims[1], THREADS_Y); // all images are three channels, so batch // parameter would be along 4th dimension NDRange global(blk_x * in.info.dims[3] * THREADS_X, blk_y * THREADS_Y); auto hsvrgbOp = KernelFunctor<Buffer, KParam, Buffer, KParam, int> (*hrKernels[device]); hsvrgbOp(EnqueueArgs(getQueue(), global, local), *out.data, out.info, *in.data, in.info, blk_x); CL_DEBUG_FINISH(getQueue()); } catch (cl::Error err) { CL_TO_AF_ERROR(err); throw; } } } }<|fim▁end|>
{
<|file_name|>test_attachment.py<|end_file_name|><|fim▁begin|>from O365 import attachment import unittest import json import base64 from random import randint att_rep = open('attachment.json','r').read() att_j = json.loads(att_rep) class TestAttachment (unittest.TestCase): def setUp(self): self.att = attachment.Attachment(att_j['value'][0]) def test_isType(self): self.assertTrue(self.att.isType('txt')) def test_getType(self): self.assertEqual(self.att.getType(),'.txt') def test_save(self): name = self.att.json['Name'] name1 = self.newFileName(name) self.att.json['Name'] = name1 self.assertTrue(self.att.save('/tmp')) with open('/tmp/'+name1,'r') as ins: f = ins.read() self.assertEqual('testing w00t!',f) name2 = self.newFileName(name) self.att.json['Name'] = name2 self.assertTrue(self.att.save('/tmp/')) with open('/tmp/'+name2,'r') as ins: f = ins.read() self.assertEqual('testing w00t!',f) def newFileName(self,val): for i in range(4): val = str(randint(0,9)) + val return val def test_getByteString(self):<|fim▁hole|> self.assertEqual(self.att.getByteString(),b'testing w00t!') def test_getBase64(self): self.assertEqual(self.att.getBase64(),'dGVzdGluZyB3MDB0IQ==\n') def test_setByteString(self): test_string = b'testing testie test' self.att.setByteString(test_string) enc = base64.encodebytes(test_string) self.assertEqual(self.att.json['ContentBytes'],enc) def setBase64(self): wrong_test_string = 'I am sooooo not base64 encoded.' right_test_string = 'Base64 <3 all around!' enc = base64.encodestring(right_test_string) self.assertRaises(self.att.setBase64(wrong_test_string)) self.assertEqual(self.att.json['ContentBytes'],'dGVzdGluZyB3MDB0IQ==\n') self.att.setBase64(enc) self.assertEqual(self.att.json['ContentBytes'],enc) if __name__ == '__main__': unittest.main()<|fim▁end|>
<|file_name|>bool.hpp<|end_file_name|><|fim▁begin|>#ifndef SC_BOOST_MPL_BOOL_HPP_INCLUDED #define SC_BOOST_MPL_BOOL_HPP_INCLUDED // Copyright Aleksey Gurtovoy 2000-2004 // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // // See http://www.boost.org/libs/mpl for documentation. // $Source: /Users/acg/CVSROOT/systemc-2.3/src/sysc/packages/boost/mpl/bool.hpp,v $ // $Date: 2009/10/14 19:11:02 $ // $Revision: 1.2 $ #include <sysc/packages/boost/mpl/bool_fwd.hpp> #include <sysc/packages/boost/mpl/integral_c_tag.hpp> #include <sysc/packages/boost/mpl/aux_/config/static_constant.hpp><|fim▁hole|>SC_BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN template< bool C_ > struct bool_ { SC_BOOST_STATIC_CONSTANT(bool, value = C_); typedef integral_c_tag tag; typedef bool_ type; typedef bool value_type; operator bool() const { return this->value; } }; #if !defined(SC_BOOST_NO_INCLASS_MEMBER_INITIALIZATION) template< bool C_ > bool const bool_<C_>::value; #endif SC_BOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE #endif // SC_BOOST_MPL_BOOL_HPP_INCLUDED<|fim▁end|>
<|file_name|>authenticated_group_adder.go<|end_file_name|><|fim▁begin|>/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package group import ( "net/http" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" ) // AuthenticatedGroupAdder adds system:authenticated group when appropriate type AuthenticatedGroupAdder struct { // Authenticator is delegated to make the authentication decision Authenticator authenticator.Request } // NewAuthenticatedGroupAdder wraps a request authenticator, and adds the system:authenticated group when appropriate.<|fim▁hole|>// Authentication must succeed, the user must not be system:anonymous, the groups system:authenticated or system:unauthenticated must // not be present func NewAuthenticatedGroupAdder(auth authenticator.Request) authenticator.Request { return &AuthenticatedGroupAdder{auth} } func (g *AuthenticatedGroupAdder) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { u, ok, err := g.Authenticator.AuthenticateRequest(req) if err != nil || !ok { return nil, ok, err } if u.GetName() == user.Anonymous { return u, true, nil } for _, group := range u.GetGroups() { if group == user.AllAuthenticated || group == user.AllUnauthenticated { return u, true, nil } } return &user.DefaultInfo{ Name: u.GetName(), UID: u.GetUID(), Groups: append(u.GetGroups(), user.AllAuthenticated), Extra: u.GetExtra(), }, true, nil }<|fim▁end|>
<|file_name|>tesstrain.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # (C) Copyright 2014, Google Inc. # (C) Copyright 2018, James R Barlow # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This script provides an easy way to execute various phases of training # Tesseract. For a detailed description of the phases, see # https://github.com/tesseract-ocr/tesseract/wiki/TrainingTesseract import logging import os import sys if (sys.version_info.major < 3) or (sys.version_info.major == 3 and sys.version_info.minor < 6): raise Exception("Must be using Python minimum version 3.6!") sys.path.insert(0, os.path.dirname(__file__)) from tesstrain_utils import ( parse_flags, initialize_fontconfig, phase_I_generate_image, phase_UP_generate_unicharset, phase_E_extract_features, make_lstmdata, cleanup, ) import language_specific log = logging.getLogger() def setup_logging_console(): log.setLevel(logging.DEBUG) console = logging.StreamHandler() console.setLevel(logging.INFO) console_formatter = logging.Formatter( "[%(asctime)s] %(levelname)s - %(message)s", datefmt="%H:%M:%S" ) console.setFormatter(console_formatter) log.addHandler(console) def setup_logging_logfile(logfile): logfile = logging.FileHandler(logfile) logfile.setLevel(logging.DEBUG) logfile_formatter = logging.Formatter( "[%(asctime)s] - %(levelname)s - %(name)s - %(message)s" ) logfile.setFormatter(logfile_formatter)<|fim▁hole|> setup_logging_console() ctx = parse_flags() setup_logging_logfile(ctx.log_file) if not ctx.linedata: log.error("--linedata_only is required since only LSTM is supported") sys.exit(1) log.info(f"=== Starting training for language {ctx.lang_code}") ctx = language_specific.set_lang_specific_parameters(ctx, ctx.lang_code) initialize_fontconfig(ctx) phase_I_generate_image(ctx, par_factor=8) phase_UP_generate_unicharset(ctx) if ctx.linedata: phase_E_extract_features(ctx, ["--psm", "6", "lstm.train"], "lstmf") make_lstmdata(ctx) cleanup(ctx) log.info("All done!") return 0 if __name__ == "__main__": main() # _rc0 = subprocess.call(["tlog","\n=== Starting training for language '"+str(LANG_CODE.val)+"'"],shell=True) # _rc0 = subprocess.call(["source",os.popen("dirname "+__file__).read().rstrip("\n")+"/language-specific.sh"],shell=True) # _rc0 = subprocess.call(["set_lang_specific_parameters",str(LANG_CODE.val)],shell=True) # _rc0 = subprocess.call(["initialize_fontconfig"],shell=True) # _rc0 = subprocess.call(["phase_I_generate_image","8"],shell=True) # _rc0 = subprocess.call(["phase_UP_generate_unicharset"],shell=True) # if (LINEDATA ): # subprocess.call(["phase_E_extract_features"," --psm 6 lstm.train ","8","lstmf"],shell=True) # subprocess.call(["make__lstmdata"],shell=True) # subprocess.call(["tlog","\nCreated starter traineddata for language '"+str(LANG_CODE.val)+"'\n"],shell=True) # subprocess.call(["tlog","\nRun lstmtraining to do the LSTM training for language '"+str(LANG_CODE.val)+"'\n"],shell=True) # else: # subprocess.call(["phase_D_generate_dawg"],shell=True) # subprocess.call(["phase_E_extract_features","box.train","8","tr"],shell=True) # subprocess.call(["phase_C_cluster_prototypes",str(TRAINING_DIR.val)+"/"+str(LANG_CODE.val)+".normproto"],shell=True) # if (str(ENABLE_SHAPE_CLUSTERING.val) == "y" ): # subprocess.call(["phase_S_cluster_shapes"],shell=True) # subprocess.call(["phase_M_cluster_microfeatures"],shell=True) # subprocess.call(["phase_B_generate_ambiguities"],shell=True) # subprocess.call(["make__traineddata"],shell=True) # subprocess.call(["tlog","\nCompleted training for language '"+str(LANG_CODE.val)+"'\n"],shell=True)<|fim▁end|>
log.addHandler(logfile) def main():
<|file_name|>TestListenerMethodBinding.java<|end_file_name|><|fim▁begin|>// Copyright 2004, 2005 The Apache Software Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package org.apache.tapestry.binding; import org.apache.hivemind.Location;<|fim▁hole|>import org.apache.tapestry.IComponent; import org.apache.tapestry.IRequestCycle; import org.apache.tapestry.PageRedirectException; import org.apache.tapestry.RedirectException; import org.apache.tapestry.coerce.ValueConverter; import org.apache.tapestry.listener.ListenerMap; /** * Test for {@link org.apache.tapestry.binding.ListenerMethodBinding}. * * @author Howard M. Lewis Ship * @since 4.0 */ public class TestListenerMethodBinding extends BindingTestCase { public void testInvokeListener() { IComponent component = newComponent(); ListenerMap map = newListenerMap(); IActionListener listener = newListener(); Location l = newLocation(); IComponent sourceComponent = newComponent(); IRequestCycle cycle = newCycle(); ValueConverter vc = newValueConverter(); trainGetListener(component, map, listener); listener.actionTriggered(sourceComponent, cycle); replayControls(); ListenerMethodBinding b = new ListenerMethodBinding("param", vc, l, component, "foo"); assertSame(b, b.getObject()); assertSame(component, b.getComponent()); b.actionTriggered(sourceComponent, cycle); verifyControls(); } public void testToString() { IComponent component = newComponent(); Location l = newLocation(); ValueConverter vc = newValueConverter(); trainGetExtendedId(component, "Fred/barney"); replayControls(); ListenerMethodBinding b = new ListenerMethodBinding("param", vc, l, component, "foo"); String toString = b.toString(); String description = toString.substring(toString.indexOf('[') + 1, toString.length() - 1); assertEquals( "param, component=Fred/barney, methodName=foo, location=classpath:/org/apache/tapestry/binding/TestListenerMethodBinding, line 1", description); verifyControls(); } public void testInvokeAndPageRedirect() { IComponent component = newComponent(); ListenerMap map = newListenerMap(); IActionListener listener = newListener(); Location l = newLocation(); ValueConverter vc = newValueConverter(); IComponent sourceComponent = newComponent(); IRequestCycle cycle = newCycle(); trainGetListener(component, map, listener); listener.actionTriggered(sourceComponent, cycle); Throwable t = new PageRedirectException("TargetPage"); setThrowable(listener, t); replayControls(); ListenerMethodBinding b = new ListenerMethodBinding("param", vc, l, component, "foo"); try { b.actionTriggered(sourceComponent, cycle); unreachable(); } catch (PageRedirectException ex) { assertSame(t, ex); } verifyControls(); } public void testInvokeAndRedirect() { IComponent component = newComponent(); ListenerMap map = newListenerMap(); IActionListener listener = newListener(); Location l = newLocation(); ValueConverter vc = newValueConverter(); IComponent sourceComponent = newComponent(); IRequestCycle cycle = newCycle(); trainGetListener(component, map, listener); listener.actionTriggered(sourceComponent, cycle); Throwable t = new RedirectException("http://foo.bar"); setThrowable(listener, t); replayControls(); ListenerMethodBinding b = new ListenerMethodBinding("param", vc, l, component, "foo"); try { b.actionTriggered(sourceComponent, cycle); unreachable(); } catch (RedirectException ex) { assertSame(t, ex); } verifyControls(); } public void testInvokeListenerFailure() { IComponent component = newComponent(); ListenerMap map = newListenerMap(); IActionListener listener = newListener(); Location l = newLocation(); ValueConverter vc = newValueConverter(); IComponent sourceComponent = newComponent(); IRequestCycle cycle = newCycle(); trainGetListener(component, map, listener); listener.actionTriggered(sourceComponent, cycle); Throwable t = new RuntimeException("Failure."); setThrowable(listener, t); trainGetExtendedId(component, "Fred/barney"); replayControls(); ListenerMethodBinding b = new ListenerMethodBinding("param", vc, l, component, "foo"); try { b.actionTriggered(sourceComponent, cycle); unreachable(); } catch (BindingException ex) { assertEquals( "Exception invoking listener method foo of component Fred/barney: Failure.", ex.getMessage()); assertSame(component, ex.getComponent()); assertSame(l, ex.getLocation()); assertSame(b, ex.getBinding()); } verifyControls(); } private void trainGetListener(IComponent component, ListenerMap lm, IActionListener listener) { trainGetListeners(component, lm); trainGetListener(lm, "foo", listener); } protected IRequestCycle newCycle() { return (IRequestCycle) newMock(IRequestCycle.class); } private void trainGetListener(ListenerMap map, String methodName, IActionListener listener) { map.getListener(methodName); setReturnValue(map, listener); } private void trainGetListeners(IComponent component, ListenerMap lm) { component.getListeners(); setReturnValue(component,lm); } private ListenerMap newListenerMap() { return (ListenerMap) newMock(ListenerMap.class); } private IActionListener newListener() { return (IActionListener) newMock(IActionListener.class); } }<|fim▁end|>
import org.apache.tapestry.BindingException; import org.apache.tapestry.IActionListener;
<|file_name|>query_result.rs<|end_file_name|><|fim▁begin|>use std::marker::PhantomData; use serde::{de, Deserialize, Deserializer}; use uuid::Uuid; /// A report about the outcome of a write. #[derive(Debug)] pub struct WriteStatus<T> { /// The number of new documents inserted. This counter is zero in case of an update or delete /// operation. In case of a replace operation you can have new documents inserted if you do a /// point-replace on a key that isn't in the table or you do a replace on a selection and one of /// the documents you are replacing has been deleted. pub inserted: u32, /// The number of documents that were updated or replaced. This counter is zero in case of a /// delete operation or an insert operation where `conflict` isn't set to "replace" or "update". pub replaced: u32, /// The number of documents that would have been modified except the new value was the same as /// the old value. This counter is zero in case of a delete operation or an insert operation /// where `confict` is set to "error". pub unchanged: u32, /// The number of documents that were skipped because the document didn't exist. This counter is /// zero in case of an insert or replace operation. pub skipped: u32, /// The number of documents that were deleted. This counter is zero in case of an insert or /// update operation. /// /// A replace with `None` increases this counter. pub deleted: u32, /// The number of errors encountered while performing the operation. pub errors: u32, /// If errors where encountered, contains the text of the first error. pub first_error: String, /// A list of generated primary keys for inserted documents whose primary keys were not /// specified (capped to 100,000). pub generated_keys: Vec<Uuid>, /// If the field `generated_keys` is truncated, you will get the warning "Too many generated /// keys (<X>), array truncated to 100000". pub warnings: String, /// If `return_changes` is set to `true`, this will be an array of objects, one for each /// object affected by the `insert` operation. pub changes: Option<Vec<(T, T)>>, } impl<T: Deserialize> Deserialize for WriteStatus<T> { fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<Self, D::Error> { field_visitor!( enum Field {<|fim▁hole|> "skipped" => Skipped, "deleted" => Deleted, "errors" => Errors, "first_error" => FirstError, "generated_keys" => GeneratedKeys, "warnings" => Warnings, "changes" => Changes, }, FieldVisitor ); struct WriteStatusVisitor<T>(PhantomData<T>); impl<T> de::Visitor for WriteStatusVisitor<T> where T: Deserialize { type Value = WriteStatus<T>; fn visit_map<V>(&mut self, mut visitor: V) -> Result<Self::Value, V::Error> where V: de::MapVisitor { let mut inserted = None; let mut replaced = None; let mut unchanged = None; let mut skipped = None; let mut deleted = None; let mut errors = None; let mut first_error = None; let mut generated_keys = None; let mut warnings = None; let mut changes = None; while let Some(key) = try!(visitor.visit_key()) { match key { Field::Inserted => inserted = Some(try!(visitor.visit_value())), Field::Replaced => replaced = Some(try!(visitor.visit_value())), Field::Unchanged => unchanged = Some(try!(visitor.visit_value())), Field::Skipped => skipped = Some(try!(visitor.visit_value())), Field::Deleted => deleted = Some(try!(visitor.visit_value())), Field::Errors => errors = Some(try!(visitor.visit_value())), Field::FirstError => first_error = Some(try!(visitor.visit_value())), Field::GeneratedKeys => generated_keys = Some(try!(visitor.visit_value())), Field::Warnings => warnings = Some(try!(visitor.visit_value())), Field::Changes => changes = Some(try!(visitor.visit_value())), } } try!(visitor.end()); let inserted = inserted.unwrap_or(0); let replaced = replaced.unwrap_or(0); let unchanged = unchanged.unwrap_or(0); let skipped = skipped.unwrap_or(0); let deleted = deleted.unwrap_or(0); let errors = errors.unwrap_or(0); let first_error = first_error.unwrap_or_default(); let generated_keys = generated_keys.unwrap_or_default(); let warnings = warnings.unwrap_or_default(); Ok(WriteStatus { inserted: inserted, replaced: replaced, unchanged: unchanged, skipped: skipped, deleted: deleted, errors: errors, first_error: first_error, generated_keys: generated_keys, warnings: warnings, changes: changes, }) } } deserializer.deserialize(WriteStatusVisitor(PhantomData)) } }<|fim▁end|>
"inserted" => Inserted, "replaced" => Replaced, "unchanged" => Unchanged,
<|file_name|>AlertPolicyManager.java<|end_file_name|><|fim▁begin|>package com.ctrip.xpipe.redis.checker.alert.manager; import com.ctrip.xpipe.redis.checker.alert.ALERT_TYPE; import com.ctrip.xpipe.redis.checker.alert.AlertChannel; import com.ctrip.xpipe.redis.checker.alert.AlertConfig; import com.ctrip.xpipe.redis.checker.alert.AlertEntity; import com.ctrip.xpipe.redis.checker.alert.message.AlertEntityHolderManager; import com.ctrip.xpipe.redis.checker.alert.policy.channel.ChannelSelector; import com.ctrip.xpipe.redis.checker.alert.policy.channel.DefaultChannelSelector; import com.ctrip.xpipe.redis.checker.alert.policy.receiver.*; import com.ctrip.xpipe.redis.checker.alert.policy.timing.RecoveryTimeSlotControl; import com.ctrip.xpipe.redis.checker.alert.policy.timing.TimeSlotControl; import com.ctrip.xpipe.redis.checker.config.CheckerDbConfig; import com.ctrip.xpipe.redis.core.meta.MetaCache; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component;<|fim▁hole|>import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.LongSupplier; /** * @author chen.zhu * <p> * Oct 18, 2017 */ @Component public class AlertPolicyManager { @Autowired private AlertConfig alertConfig; @Autowired private CheckerDbConfig checkerDbConfig; @Autowired private MetaCache metaCache; private EmailReceiver emailReceiver; private GroupEmailReceiver groupEmailReceiver; private ChannelSelector channelSelector; private TimeSlotControl recoveryTimeController; @PostConstruct public void initPolicies() { emailReceiver = new DefaultEmailReceiver(alertConfig, checkerDbConfig, metaCache); groupEmailReceiver = new DefaultGroupEmailReceiver(alertConfig, checkerDbConfig, metaCache); channelSelector = new DefaultChannelSelector(); if(recoveryTimeController == null) { recoveryTimeController = new RecoveryTimeSlotControl(alertConfig); } } public List<AlertChannel> queryChannels(AlertEntity alert) { return channelSelector.alertChannels(alert); } public long queryRecoverMilli(AlertEntity alert) { return recoveryTimeController.durationMilli(alert); } public long querySuspendMilli(AlertEntity alert) { return TimeUnit.MINUTES.toMillis(alertConfig.getAlertSystemSuspendMinute()); } public EmailReceiverModel queryEmailReceivers(AlertEntity alert) { return emailReceiver.receivers(alert); } public void markCheckInterval(ALERT_TYPE alertType, LongSupplier checkInterval) { if(recoveryTimeController == null) { recoveryTimeController = new RecoveryTimeSlotControl(alertConfig); } recoveryTimeController.mark(alertType, checkInterval); } public Map<EmailReceiverModel, Map<ALERT_TYPE, Set<AlertEntity>>> queryGroupedEmailReceivers( AlertEntityHolderManager alerts) { return groupEmailReceiver.getGroupedEmailReceiver(alerts); } }<|fim▁end|>
import javax.annotation.PostConstruct; import java.util.List; import java.util.Map;
<|file_name|>shop-list.client.routes.js<|end_file_name|><|fim▁begin|>'use strict'; //Setting up route angular.module('shop-list').config(['$stateProvider', function($stateProvider) {<|fim▁hole|> $stateProvider. state('detail-product', { url: '/detail-product/:productId', templateUrl: 'modules/shop-list/views/detail-product.client.view.html' }). state('products-list', { url: '/products-list', templateUrl: 'modules/shop-list/views/products-list.client.view.html' }); } ]);<|fim▁end|>
// Shop list state routing
<|file_name|>tool_catalog.py<|end_file_name|><|fim▁begin|>import inspect from biicode.client.shell.biistream import Color class ToolCatalog(dict): def __init__(self, main_class, tools): dict.__init__(self) self.main_class = main_class # dict from tool group name to set of classes for c in tools: self[c.group] = c self.show_advanced = False def _get_doc_short(self, doc):<|fim▁hole|> def print_help(self, out, argv): out.writeln('\nSYNOPSIS:', Color.YELLOW) out.writeln(' $ bii COMMAND [options]') out.writeln('For help about a command:', Color.YELLOW) out.writeln(' $ bii COMMAND --help') out.write('To change verbosity, use options ', Color.YELLOW) out.writeln('--quiet --verbose\n') if not argv or 'all' in argv: out.writeln('--------- Global Commands ----------', Color.YELLOW) for m in inspect.getmembers(self.main_class, predicate=inspect.ismethod): method_name = m[0] if not method_name.startswith('_'): method = m[1] if not method.__doc__.startswith(' ADVANCED'): doc = method.__doc__ out.write(' %-10s' % method_name, Color.GREEN) out.writeln(self._get_doc_short(doc)) elif self.show_advanced: doc = method.__doc__.replace(' ADVANCED', '') out.write(' %-10s' % method_name, Color.GREEN) out.writeln(self._get_doc_short(doc)) if not argv: out.writeln('\n--------- Tools ----------', Color.YELLOW) out.writeln('For help about one or more tools ("all" for all):', Color.YELLOW) out.writeln(' $ bii --help TOOL [TOOL2]\n') for group, class_ in self.iteritems(): out.write(' %-10s ' % class_.group, Color.GREEN) out.writeln(class_.__doc__) else: # Tools, as main commands for group, class_ in self.iteritems(): if group not in argv and 'all' not in argv: continue out.writeln('---------%s--------' % class_.__doc__, Color.YELLOW) for m in inspect.getmembers(class_, predicate=inspect.ismethod): method_name = m[0] method = m[1] if method.__doc__: method_doc = self._get_doc_short(method.__doc__) if not method_name.startswith('_') and not method_doc.startswith('HIDDEN'): com = '%s:%s' % (group, method_name) out.write(' %-15s ' % com, Color.GREEN) out.writeln(method_doc)<|fim▁end|>
return doc.split('\n', 1)[0]
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from contextlib import contextmanager from datetime import datetime from django import forms from django.conf import settings from django.contrib.auth.models import AbstractBaseUser from django.core import validators from django.db import models from django.utils import translation from django.utils.encoding import smart_unicode from django.utils.functional import lazy import commonware.log import tower from cache_nuggets.lib import memoize from tower import ugettext as _ import amo import amo.models from amo.urlresolvers import reverse from mkt.translations.fields import NoLinksField, save_signal from mkt.translations.query import order_by_translation log = commonware.log.getLogger('z.users') class UserForeignKey(models.ForeignKey): """ A replacement for models.ForeignKey('users.UserProfile'). This field uses UserEmailField to make form fields key off the user's email instead of the primary key id. We also hook up autocomplete automatically. """ def __init__(self, *args, **kw): super(UserForeignKey, self).__init__(UserProfile, *args, **kw) def value_from_object(self, obj): return getattr(obj, self.name).email def formfield(self, **kw): defaults = {'form_class': UserEmailField} defaults.update(kw) return models.Field.formfield(self, **defaults) class UserEmailField(forms.EmailField): def clean(self, value): if value in validators.EMPTY_VALUES: raise forms.ValidationError(self.error_messages['required']) try: return UserProfile.objects.get(email=value) except UserProfile.DoesNotExist: raise forms.ValidationError(_('No user with that email.')) def widget_attrs(self, widget): lazy_reverse = lazy(reverse, str) return {'class': 'email-autocomplete', 'data-src': lazy_reverse('users.ajax')} AbstractBaseUser._meta.get_field('password').max_length = 255 class UserProfile(amo.models.OnChangeMixin, amo.models.ModelBase, AbstractBaseUser): USERNAME_FIELD = 'username' username = models.CharField(max_length=255, default='', unique=True) display_name = models.CharField(max_length=255, default='', null=True, blank=True) email = models.EmailField(unique=True, null=True) averagerating = models.CharField(max_length=255, blank=True, null=True) bio = NoLinksField(short=False) confirmationcode = models.CharField(max_length=255, default='', blank=True) deleted = models.BooleanField(default=False) display_collections = models.BooleanField(default=False) display_collections_fav = models.BooleanField(default=False) emailhidden = models.BooleanField(default=True) homepage = models.URLField(max_length=255, blank=True, default='') location = models.CharField(max_length=255, blank=True, default='') notes = models.TextField(blank=True, null=True) notifycompat = models.BooleanField(default=True) notifyevents = models.BooleanField(default=True) occupation = models.CharField(max_length=255, default='', blank=True) # This is essentially a "has_picture" flag right now picture_type = models.CharField(max_length=75, default='', blank=True) resetcode = models.CharField(max_length=255, default='', blank=True) resetcode_expires = models.DateTimeField(default=datetime.now, null=True, blank=True) read_dev_agreement = models.DateTimeField(null=True, blank=True) last_login_ip = models.CharField(default='', max_length=45, editable=False) last_login_attempt = models.DateTimeField(null=True, editable=False) last_login_attempt_ip = models.CharField(default='', max_length=45, editable=False) failed_login_attempts = models.PositiveIntegerField(default=0, editable=False) source = models.PositiveIntegerField(default=amo.LOGIN_SOURCE_UNKNOWN, editable=False, db_index=True) is_verified = models.BooleanField(default=True) region = models.CharField(max_length=11, null=True, blank=True, editable=False) lang = models.CharField(max_length=5, null=True, blank=True, editable=False) class Meta: db_table = 'users' def __init__(self, *args, **kw): super(UserProfile, self).__init__(*args, **kw) if self.username: self.username = smart_unicode(self.username) def __unicode__(self): return u'%s: %s' % (self.id, self.display_name or self.username) def save(self, force_insert=False, force_update=False, using=None, **kwargs): # we have to fix stupid things that we defined poorly in remora if not self.resetcode_expires: self.resetcode_expires = datetime.now() super(UserProfile, self).save(force_insert, force_update, using, **kwargs) @property def is_superuser(self): return self.groups.filter(rules='*:*').exists() @property def is_staff(self): from mkt.access import acl return acl.action_allowed_user(self, 'Admin', '%') def has_perm(self, perm, obj=None): return self.is_superuser def has_module_perms(self, app_label): return self.is_superuser def get_backend(self): return 'django_browserid.auth.BrowserIDBackend' def set_backend(self, val): pass backend = property(get_backend, set_backend) def is_anonymous(self): return False def get_url_path(self, src=None): # See: bug 880767. return '#' def my_apps(self, n=8): """Returns n apps""" qs = self.addons.filter(type=amo.ADDON_WEBAPP) qs = order_by_translation(qs, 'name') return qs[:n] @amo.cached_property def is_developer(self): return self.addonuser_set.exists() @property def name(self): return smart_unicode(self.display_name or self.username) @amo.cached_property def reviews(self): """All reviews that are not dev replies.""" qs = self._reviews_all.filter(reply_to=None) # Force the query to occur immediately. Several # reviews-related tests hang if this isn't done. return qs<|fim▁hole|> self.password = "sha512$Anonymous$Password" self.username = "Anonymous-%s" % self.id # Can't be null self.display_name = None self.homepage = "" self.deleted = True self.picture_type = "" self.save() def check_password(self, raw_password): # BrowserID does not store a password. return True def log_login_attempt(self, successful): """Log a user's login attempt""" self.last_login_attempt = datetime.now() self.last_login_attempt_ip = commonware.log.get_remote_addr() if successful: log.debug(u"User (%s) logged in successfully" % self) self.failed_login_attempts = 0 self.last_login_ip = commonware.log.get_remote_addr() else: log.debug(u"User (%s) failed to log in" % self) if self.failed_login_attempts < 16777216: self.failed_login_attempts += 1 self.save() def purchase_ids(self): """ I'm special casing this because we use purchase_ids a lot in the site and we are not caching empty querysets in cache-machine. That means that when the site is first launched we are having a lot of empty queries hit. We can probably do this in smarter fashion by making cache-machine cache empty queries on an as need basis. """ # Circular import from mkt.prices.models import AddonPurchase @memoize(prefix='users:purchase-ids') def ids(pk): return (AddonPurchase.objects.filter(user=pk) .values_list('addon_id', flat=True) .filter(type=amo.CONTRIB_PURCHASE) .order_by('pk')) return ids(self.pk) @contextmanager def activate_lang(self): """ Activate the language for the user. If none is set will go to the site default which is en-US. """ lang = self.lang if self.lang else settings.LANGUAGE_CODE old = translation.get_language() tower.activate(lang) yield tower.activate(old) models.signals.pre_save.connect(save_signal, sender=UserProfile, dispatch_uid='userprofile_translations') class UserNotification(amo.models.ModelBase): user = models.ForeignKey(UserProfile, related_name='notifications') notification_id = models.IntegerField() enabled = models.BooleanField(default=False) class Meta: db_table = 'users_notifications' @staticmethod def update_or_create(update={}, **kwargs): rows = UserNotification.objects.filter(**kwargs).update(**update) if not rows: update.update(dict(**kwargs)) UserNotification.objects.create(**update)<|fim▁end|>
def anonymize(self): log.info(u"User (%s: <%s>) is being anonymized." % (self, self.email)) self.email = None
<|file_name|>Conversor.java<|end_file_name|><|fim▁begin|>/** Copyright 2008, 2009 UFPE - Universidade Federal de Pernambuco Este arquivo é parte do programa Amadeus Sistema de Gestão de Aprendizagem, ou simplesmente Amadeus LMS O Amadeus LMS é um software livre; você pode redistribui-lo e/ou modifica-lo dentro dos termos da Licença Pública Geral GNU como publicada pela Fundação do Software Livre (FSF); na versão 2 da Licença. Este programa é distribuído na esperança que possa ser útil, mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para maiores detalhes. Você deve ter recebido uma cópia da Licença Pública Geral GNU, sob o título "LICENCA.txt", junto com este programa, se não, escreva para a Fundação do Software Livre (FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. **/ package br.ufpe.cin.amadeus.amadeus_mobile.util; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile; import br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Forum; import br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Game; import br.ufpe.cin.amadeus.amadeus_web.domain.content_management.LearningObject; import br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Poll; public class Conversor { /** * Method that converts AMADeUs Course object into Mobile Course object * @param curso - AMADeUs Course to be converted * @return - Converted Mobile Course object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.CourseMobile converterCurso(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Course curso){ br.ufpe.cin.amadeus.amadeus_mobile.basics.CourseMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.CourseMobile(); retorno.setId(curso.getId()); retorno.setName(curso.getName()); retorno.setContent(curso.getContent()); retorno.setObjectives(curso.getObjectives()); retorno.setModules(converterModulos(curso.getModules())); retorno.setKeywords(converterKeywords(curso.getKeywords())); ArrayList<String> nomes = new ArrayList<String>(); nomes.add(curso.getProfessor().getName()); retorno.setTeachers(nomes); retorno.setCount(0); retorno.setMaxAmountStudents(curso.getMaxAmountStudents()); retorno.setFinalCourseDate(curso.getFinalCourseDate()); retorno.setInitialCourseDate(curso.getInitialCourseDate()); return retorno; } /** * Method that converts a AMADeUs Course object list into Mobile Course object list * @param cursos - AMADeUs Course object list to be converted * @return - Converted Mobile Course object list */ public static List<br.ufpe.cin.amadeus.amadeus_mobile.basics.CourseMobile> converterCursos(List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Course> cursos){ ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.CourseMobile> retorno = new ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.CourseMobile>(); for (br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Course c : cursos){ retorno.add(Conversor.converterCurso(c)); } return retorno; } /** * Method that converts AMADeUs Module object into Mobile Module object * @param modulo - AMADeUs Module object to be converted * @return - Converted Mobile Module object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.ModuleMobile converterModulo(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Module modulo){ br.ufpe.cin.amadeus.amadeus_mobile.basics.ModuleMobile mod = new br.ufpe.cin.amadeus.amadeus_mobile.basics.ModuleMobile(modulo.getId(), modulo.getName()); List<HomeworkMobile> listHomeworks = new ArrayList<HomeworkMobile>(); for (Poll poll : modulo.getPolls()) { listHomeworks.add( Conversor.converterPollToHomework(poll) ); } for (Forum forum : modulo.getForums()) { listHomeworks.add( Conversor.converterForumToHomework(forum) ); } for(Game game : modulo.getGames()){ listHomeworks.add( Conversor.converterGameToHomework(game) ); } for(LearningObject learning : modulo.getLearningObjects()){ listHomeworks.add( Conversor.converterLearningObjectToHomework(learning) ); } mod.setHomeworks(listHomeworks); mod.setMaterials(converterMaterials(modulo.getMaterials())); return mod; } /** * Mothod that converts a AMADeUs Module object list into Mobile Module object list * @param modulos - AMADeUs Module object list to be converted * @return - Converted Mobile Module object list */ public static List<br.ufpe.cin.amadeus.amadeus_mobile.basics.ModuleMobile> converterModulos(List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Module> modulos){ ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.ModuleMobile> retorno = new ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.ModuleMobile>(); for (br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Module m : modulos){ retorno.add(Conversor.converterModulo(m)); } return retorno; } /** * Method that converts AMADeUs Homework object into Mobile Homework object * @param home - AMADeUs Homework object to be converted * @return - Converted Mobile Homework object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile converterHomework(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Homework home){ br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile(); retorno.setId(home.getId()); retorno.setName(home.getName()); retorno.setDescription(home.getDescription()); retorno.setInitDate(home.getInitDate()); retorno.setDeadline(home.getDeadline()); retorno.setAlowPostponing(home.getAllowPostponing()); retorno.setInfoExtra(""); retorno.setTypeActivity(HomeworkMobile.HOMEWORK); return retorno; } /** * Method that converts AMADeUs Game object into Mobile Homework object * @param game - AMADeUs Game object to be converted * @return - Converted Mobile Homework object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile converterGameToHomework(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Game game){ br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile(); retorno.setId(game.getId()); retorno.setName(game.getName()); retorno.setDescription(game.getDescription()); retorno.setInfoExtra(game.getUrl()); retorno.setTypeActivity(HomeworkMobile.GAME); return retorno; } /** * Method that converts AMADeUs Forum object into Mobile Homework object * @param forum - AMADeUs Forum object to be converted * @return - Converted Mobile Homework object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile converterForumToHomework(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Forum forum){ br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile(); retorno.setId(forum.getId()); retorno.setName(forum.getName()); retorno.setDescription(forum.getDescription()); retorno.setInitDate(forum.getCreationDate()); retorno.setInfoExtra(""); retorno.setTypeActivity(HomeworkMobile.FORUM); return retorno; } /** * Method that converts AMADeUs Poll object into Mobile Homework object * @param poll - AMADeUs Poll object to be converted * @return - Converted Mobile Homework object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile converterPollToHomework(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Poll poll){ br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile(); retorno.setId(poll.getId()); retorno.setName(poll.getName()); retorno.setDescription(poll.getQuestion()); retorno.setInitDate(poll.getCreationDate()); retorno.setDeadline(poll.getFinishDate()); retorno.setInfoExtra(""); retorno.setTypeActivity(HomeworkMobile.POLL); return retorno; } /** * Method that converts AMADeUs Multimedia object into Mobile Homework object * @param media - AMADeUs Multimedia object to be converted * @return - Converted Mobile Homework object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile converterMultimediaToHomework(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Multimedia media){ br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile(); retorno.setId(media.getId()); retorno.setName(media.getName()); retorno.setDescription(media.getDescription()); retorno.setInfoExtra(media.getUrl()); retorno.setTypeActivity(HomeworkMobile.MULTIMEDIA); return retorno; } /** * Method that converts AMADeUs Video object into Mobile Homework object * @param video - AMADeUs Video object to be converted * @return - Converted Mobile Homework object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile converterVideoToHomework(br.ufpe.cin.amadeus.amadeus_sdmm.dao.Video video){ br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile(); retorno.setId(video.getId()); retorno.setName(video.getName()); retorno.setDescription(video.getDescription()); retorno.setInitDate(video.getDateinsertion()); retorno.setInfoExtra(video.getTags()); retorno.setTypeActivity(HomeworkMobile.VIDEO); return retorno; } public static br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile converterLearningObjectToHomework( br.ufpe.cin.amadeus.amadeus_web.domain.content_management.LearningObject learning) { br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile(); retorno.setId(learning.getId()); retorno.setName(learning.getName()); retorno.setUrl(learning.getUrl()); retorno.setDescription(learning.getDescription()); retorno.setDeadline(learning.getCreationDate()); retorno.setTypeActivity(HomeworkMobile.LEARNING_OBJECT); return retorno; } /** * Method that converts AMADeUs Homework object list into Mobile Homework object list * @param homes - AMADeUs Homework object list to be converted * @return - Converted Mobile Homework object list */ public static List<br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile> converterHomeworks(List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Homework> homes){ ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile> retorno = new ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.HomeworkMobile>(); for (br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Homework h : homes){ retorno.add(Conversor.converterHomework(h)); } return retorno; } /** <|fim▁hole|> */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.MaterialMobile converterMaterial(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Material mat){ br.ufpe.cin.amadeus.amadeus_mobile.basics.MaterialMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.MaterialMobile(); retorno.setId(mat.getId()); retorno.setName(mat.getArchiveName()); retorno.setAuthor(converterPerson(mat.getAuthor())); retorno.setPostDate(mat.getCreationDate()); return retorno; } /** * Method that converts AMADeUs Mobile Material object list into Mobile Material object list * @param mats - AMADeUs Material object list * @return - Mobile Material object list */ public static List<br.ufpe.cin.amadeus.amadeus_mobile.basics.MaterialMobile> converterMaterials(List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Material> mats){ ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.MaterialMobile> retorno = new ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.MaterialMobile>(); for (br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Material mat : mats){ retorno.add(Conversor.converterMaterial(mat)); } return retorno; } /** * Method that converts AMADeUs Keyword object into Mobile Keyword object * @param key - AMADeUs Keyword object to be converted * @return - Converted Keywork object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.KeywordMobile converterKeyword(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Keyword key){ br.ufpe.cin.amadeus.amadeus_mobile.basics.KeywordMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.KeywordMobile(); retorno.setId(key.getId()); retorno.setName(key.getName()); retorno.setPopularity(key.getPopularity()); return retorno; } /** * Method that converts AMADeUs Keyword object list into a Mobile Keyword HashSet object * @param keys - AMADeUs Keyword object list to be converted * @return - Mobile Keywork HashSet object list */ public static HashSet<br.ufpe.cin.amadeus.amadeus_mobile.basics.KeywordMobile> converterKeywords(Set<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Keyword> keys){ HashSet<br.ufpe.cin.amadeus.amadeus_mobile.basics.KeywordMobile> retorno = new HashSet<br.ufpe.cin.amadeus.amadeus_mobile.basics.KeywordMobile>(); for (br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Keyword k : keys){ retorno.add(Conversor.converterKeyword(k)); } return retorno; } /** * Method that converts AMADeUs Choice object into Mobile Choice object * @param ch - AMADeUs Choice object to be converted * @return - Converted Mobile Choice object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.ChoiceMobile converterChoice(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Choice ch){ br.ufpe.cin.amadeus.amadeus_mobile.basics.ChoiceMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.ChoiceMobile(); retorno.setId(ch.getId()); retorno.setAlternative(ch.getAlternative()); retorno.setVotes(ch.getVotes()); retorno.setPercentage(ch.getPercentage()); return retorno; } /** * Method that converts AMADeUs Choice object list into Mobile Choice object list * @param chs - AMADeUs Choice object list to be converted * @return - Converted Mobile Choice object list */ public static List<br.ufpe.cin.amadeus.amadeus_mobile.basics.ChoiceMobile> converterChoices(List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Choice> chs){ List<br.ufpe.cin.amadeus.amadeus_mobile.basics.ChoiceMobile> retorno = new ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.ChoiceMobile>(); for (br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Choice c : chs){ retorno.add(Conversor.converterChoice(c)); } return retorno; } /** * Method that converts AMADeUs Poll object into Mobile Poll Object * @param p - AMADeUs Poll object to be converted * @return - Converted Mobile Poll object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.PollMobile converterPool(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Poll p){ br.ufpe.cin.amadeus.amadeus_mobile.basics.PollMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.PollMobile(); retorno.setId(p.getId()); retorno.setName(p.getName()); retorno.setQuestion(p.getQuestion()); retorno.setInitDate(p.getCreationDate()); retorno.setFinishDate(p.getFinishDate()); retorno.setAnswered(false); retorno.setChoices(converterChoices(p.getChoices())); retorno.setAnsewered(converterAnswers(p.getAnswers())); return retorno; } public static br.ufpe.cin.amadeus.amadeus_mobile.basics.LearningObjectMobile converterLearningObject (br.ufpe.cin.amadeus.amadeus_web.domain.content_management.LearningObject learning){ br.ufpe.cin.amadeus.amadeus_mobile.basics.LearningObjectMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.LearningObjectMobile(); retorno.setId(learning.getId()); retorno.setName(learning.getName()); retorno.setDescription(learning.getDescription()); retorno.setDatePublication(learning.getCreationDate()); retorno.setUrl(learning.getUrl()); return retorno; } /** * Method that converts AMADeUs Poll object list into Mobile Poll object list * @param pls - AMADeUs Poll object list * @return - Converted Mobile object list */ public static List<br.ufpe.cin.amadeus.amadeus_mobile.basics.PollMobile> converterPools(List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Poll> pls){ List<br.ufpe.cin.amadeus.amadeus_mobile.basics.PollMobile> retorno = new ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.PollMobile>(); for (br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Poll p : pls){ retorno.add(Conversor.converterPool(p)); } return retorno; } /** * Method that converts AMADeUs Answer object into Mobile Answer object * @param ans - AMADeUs Answer object to be converted * @return - Converted Mobile Answer object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.AnswerMobile converterAnswer(br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Answer ans){ br.ufpe.cin.amadeus.amadeus_mobile.basics.AnswerMobile retorno = new br.ufpe.cin.amadeus.amadeus_mobile.basics.AnswerMobile(); retorno.setId(ans.getId()); retorno.setAnswerDate(ans.getAnswerDate()); retorno.setPerson(converterPerson(ans.getPerson())); return retorno; } /** * Method that converts AMADeUs Answer object list into Mobile Answer object list * @param anss - AMADeUs Answer object list * @return - Converted Mobile object list */ public static List<br.ufpe.cin.amadeus.amadeus_mobile.basics.AnswerMobile> converterAnswers(List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Answer> anss){ List<br.ufpe.cin.amadeus.amadeus_mobile.basics.AnswerMobile> retorno = new ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.AnswerMobile>(); for (br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Answer an : anss){ retorno.add(Conversor.converterAnswer(an)); } return retorno; } /** * Method that converts AMADeUs Person object into Mobile Person object * @param p - AMADeUs Person object to be converted * @return - Converted Mobile Person object */ public static br.ufpe.cin.amadeus.amadeus_mobile.basics.PersonMobile converterPerson(br.ufpe.cin.amadeus.amadeus_web.domain.register.Person p){ return new br.ufpe.cin.amadeus.amadeus_mobile.basics.PersonMobile(p.getId(), p.getAccessInfo().getLogin(), p.getPhoneNumber()); } /** * Method that converts AMADeUs Person object list into Mobile Person object list * @param persons - AMADeUs Person object list to be converted * @return - Converted Mobile Person object list */ public static List<br.ufpe.cin.amadeus.amadeus_mobile.basics.PersonMobile> converterPersons(List<br.ufpe.cin.amadeus.amadeus_web.domain.register.Person> persons){ List<br.ufpe.cin.amadeus.amadeus_mobile.basics.PersonMobile> retorno = new ArrayList<br.ufpe.cin.amadeus.amadeus_mobile.basics.PersonMobile>(); for (br.ufpe.cin.amadeus.amadeus_web.domain.register.Person p : persons){ retorno.add(Conversor.converterPerson(p)); } return retorno; } /** * Method that converts Mobile Poll object into AMADeUs Poll Object * @param p - Mobile Poll object to be converted * @return - Converted AMADeUs Poll object */ public static br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Poll converterPool(br.ufpe.cin.amadeus.amadeus_mobile.basics.PollMobile p){ br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Poll retorno = new br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Poll(); retorno.setId(p.getId()); retorno.setName(p.getName()); retorno.setQuestion(p.getQuestion()); retorno.setCreationDate(p.getInitDate()); retorno.setFinishDate(p.getFinishDate()); retorno.setChoices(converterChoices2(p.getChoices())); retorno.setAnswers(converterAnswers2(p.getAnsewered())); return retorno; } /** * Method that converts Mobile Choice object into AMADeUs Choice object * @param ch - Mobile Choice object to be converted * @return - Converted AMADeUs Choice object */ public static br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Choice converterChoice(br.ufpe.cin.amadeus.amadeus_mobile.basics.ChoiceMobile ch){ br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Choice retorno = new br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Choice(); retorno.setId(ch.getId()); retorno.setAlternative(ch.getAlternative()); retorno.setVotes(ch.getVotes()); retorno.setPercentage(ch.getPercentage()); return retorno; } /** * Method that converts Mobile Choiceobject list into AMADeUs Choice object list * @param chs - Mobile Choice object list to be converted * @return - Converted AMADeUs Choice object list */ public static List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Choice> converterChoices2(List<br.ufpe.cin.amadeus.amadeus_mobile.basics.ChoiceMobile> chs){ List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Choice> retorno = new ArrayList<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Choice>(); for (br.ufpe.cin.amadeus.amadeus_mobile.basics.ChoiceMobile c : chs){ retorno.add(Conversor.converterChoice(c)); } return retorno; } /** * Method that converts Mobile Answer object into AMADeUs Answer object * @param ans - Mobile Answer object to be converted * @return - Converted AMADeUs Answer object */ public static br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Answer converterAnswer(br.ufpe.cin.amadeus.amadeus_mobile.basics.AnswerMobile ans){ br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Answer retorno = new br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Answer(); retorno.setId(ans.getId()); retorno.setAnswerDate(ans.getAnswerDate()); retorno.setPerson(converterPerson(ans.getPerson())); return retorno; } /** * Method that converts Mobile Answer object list into AMADeUs Answer object list * @param anss - Mobile Answer object list * @return - Converted AMADeUs Answer object list */ public static List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Answer> converterAnswers2(List<br.ufpe.cin.amadeus.amadeus_mobile.basics.AnswerMobile> anss){ List<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Answer> retorno = new ArrayList<br.ufpe.cin.amadeus.amadeus_web.domain.content_management.Answer>(); for (br.ufpe.cin.amadeus.amadeus_mobile.basics.AnswerMobile an : anss){ retorno.add(Conversor.converterAnswer(an)); } return retorno; } /** * Method that converts Mobile Person object into AMADeUs Person object * @param p - Mobile Person object to be converted * @return - Converted AMADeUs Person object */ public static br.ufpe.cin.amadeus.amadeus_web.domain.register.Person converterPerson(br.ufpe.cin.amadeus.amadeus_mobile.basics.PersonMobile p){ br.ufpe.cin.amadeus.amadeus_web.domain.register.Person p1 = new br.ufpe.cin.amadeus.amadeus_web.domain.register.Person(); p1.setId(p.getId()); p1.setName(p.getName()); p1.setPhoneNumber(p.getPhoneNumber()); return p1; } }<|fim▁end|>
* Method that converts AMADeUs Material object into Mobile Material object * @param mat - AMADeUs Material object to be converted * @return - Mobile Material object converted
<|file_name|>gateway_tcp.py<|end_file_name|><|fim▁begin|>"""Start a tcp gateway.""" import click from mysensors.cli.helper import ( common_gateway_options, handle_msg, run_async_gateway, run_gateway, ) from mysensors.gateway_tcp import AsyncTCPGateway, TCPGateway def common_tcp_options(func): """Supply common tcp gateway options."""<|fim▁hole|> "--port", default=5003, show_default=True, type=int, help="TCP port of the connection.", )(func) func = click.option( "-H", "--host", required=True, help="TCP address of the gateway." )(func) return func @click.command(options_metavar="<options>") @common_tcp_options @common_gateway_options def tcp_gateway(**kwargs): """Start a tcp gateway.""" gateway = TCPGateway(event_callback=handle_msg, **kwargs) run_gateway(gateway) @click.command(options_metavar="<options>") @common_tcp_options @common_gateway_options def async_tcp_gateway(**kwargs): """Start an async tcp gateway.""" gateway = AsyncTCPGateway(event_callback=handle_msg, **kwargs) run_async_gateway(gateway)<|fim▁end|>
func = click.option( "-p",
<|file_name|>circle_urls.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#!/usr/bin/env python ''' circle_urls.py will rename all url files to not have extension .html ''' import sys import os from glob import glob site_dir = os.path.abspath(sys.argv[1]) print("Using site directory %s" %(site_dir)) files = glob("%s/*.html" %(site_dir)) # For each file, we need to replace all links to have correct .html extension search_names = [os.path.basename(f).replace('.html','') for f in files] for html_file in files: with open(html_file,'r') as filey: content = filey.read() for search_name in search_names: content = content.replace('%s"' %(search_name),'%s.html"' %(search_name)) content = content.replace('/images/logo/logo.png','https://sci-f.github.io/apps/assets/img/logo/logo.png') with open(html_file,'w') as filey: filey.write(content)<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>def get_all_styles(): """ Returns previously registered by richtemplates at ``richtemplates.settings.REGISTERED_PYGMENTS_STYLES``. """ from richtemplates.settings import REGISTERED_PYGMENTS_STYLES return REGISTERED_PYGMENTS_STYLES def get_style(alias): """ Returns pygments style class. Available styles may be retrieved using<|fim▁hole|> ``get_all_styles`` method. """ return get_all_styles()[alias]<|fim▁end|>
<|file_name|>status.py<|end_file_name|><|fim▁begin|>from gettext import gettext as _ import traceback from pulp.client.commands.repo.sync_publish import StatusRenderer from pulp.client.extensions.core import COLOR_FAILURE from pulp_puppet.common import constants from pulp_puppet.common.publish_progress import PublishProgressReport from pulp_puppet.common.sync_progress import SyncProgressReport class PuppetStatusRenderer(StatusRenderer): def __init__(self, context): super(PuppetStatusRenderer, self).__init__(context) # Sync Steps self.sync_metadata_last_state = constants.STATE_NOT_STARTED self.sync_modules_last_state = constants.STATE_NOT_STARTED # Publish Steps self.publish_modules_last_state = constants.STATE_NOT_STARTED self.publish_metadata_last_state = constants.STATE_NOT_STARTED self.publish_http_last_state = constants.STATE_NOT_STARTED self.publish_https_last_state = constants.STATE_NOT_STARTED # UI Widgets self.sync_metadata_bar = self.prompt.create_progress_bar() self.sync_modules_bar = self.prompt.create_progress_bar() self.publish_modules_bar = self.prompt.create_progress_bar() self.publish_metadata_spinner = self.prompt.create_spinner() def display_report(self, progress_report): # Sync Steps if constants.IMPORTER_ID in progress_report: sync_report = SyncProgressReport.from_progress_dict(progress_report[constants.IMPORTER_ID]) self._display_sync_metadata_step(sync_report) self._display_sync_modules_step(sync_report) # Publish Steps if constants.DISTRIBUTOR_ID in progress_report: publish_report = PublishProgressReport.from_progress_dict(progress_report[constants.DISTRIBUTOR_ID]) self._display_publish_modules_step(publish_report) self._display_publish_metadata_step(publish_report) self._display_publish_http_https_step(publish_report) def _display_sync_metadata_step(self, sync_report): # Do nothing if it hasn't started yet or has already finished if sync_report.metadata_state == constants.STATE_NOT_STARTED or \ self.sync_metadata_last_state in constants.COMPLETE_STATES: return # Only render this on the first non-not-started state if self.sync_metadata_last_state == constants.STATE_NOT_STARTED: self.prompt.write(_('Downloading metadata...'), tag='download-metadata') # Same behavior for running or success if sync_report.metadata_state in (constants.STATE_RUNNING, constants.STATE_SUCCESS): items_done = sync_report.metadata_query_finished_count items_total = sync_report.metadata_query_total_count item_type = _('Metadata Query') self._render_itemized_in_progress_state(items_done, items_total, item_type, self.sync_metadata_bar, sync_report.metadata_state) # The only state left to handle is if it failed else: self.prompt.render_failure_message(_('... failed')) self.prompt.render_spacer() self._render_error(sync_report.metadata_error_message, sync_report.metadata_exception, sync_report.metadata_traceback) # Before finishing update the state self.sync_metadata_last_state = sync_report.metadata_state def _display_sync_modules_step(self, sync_report): # Do nothing if it hasn't started yet or has already finished if sync_report.modules_state == constants.STATE_NOT_STARTED or \ self.sync_modules_last_state in constants.COMPLETE_STATES: return # Only render this on the first non-not-started state if self.sync_modules_last_state == constants.STATE_NOT_STARTED: self.prompt.write(_('Downloading new modules...'), tag='downloading') # Same behavior for running or success if sync_report.modules_state in (constants.STATE_RUNNING, constants.STATE_SUCCESS): items_done = sync_report.modules_finished_count + sync_report.modules_error_count items_total = sync_report.modules_total_count item_type = _('Module') self._render_itemized_in_progress_state(items_done, items_total, item_type, self.sync_modules_bar, sync_report.modules_state) # The only state left to handle is if it failed else: self.prompt.render_failure_message(_('... failed')) self.prompt.render_spacer() self._render_error(sync_report.modules_error_message, sync_report.modules_exception, sync_report.modules_traceback) # Regardless of success or failure, display any individual module errors # if the new state is complete if sync_report.modules_state in constants.COMPLETE_STATES: self._render_module_errors(sync_report.modules_individual_errors) # Before finishing update the state self.sync_modules_last_state = sync_report.modules_state def _display_publish_modules_step(self, publish_report): # Do nothing if it hasn't started yet or has already finished if publish_report.modules_state == constants.STATE_NOT_STARTED or \ self.publish_modules_last_state in constants.COMPLETE_STATES: return # Only render this on the first non-not-started state if self.publish_modules_last_state == constants.STATE_NOT_STARTED: self.prompt.write(_('Publishing modules...'), tag='publishing') # Same behavior for running or success if publish_report.modules_state in (constants.STATE_RUNNING, constants.STATE_SUCCESS): items_done = publish_report.modules_finished_count + publish_report.modules_error_count items_total = publish_report.modules_total_count item_type = _('Module') self._render_itemized_in_progress_state(items_done, items_total, item_type, self.publish_modules_bar, publish_report.modules_state) # The only state left to handle is if it failed else: self.prompt.render_failure_message(_('... failed')) self.prompt.render_spacer() self._render_error(publish_report.modules_error_message, publish_report.modules_exception, publish_report.modules_traceback) # Regardless of success or failure, display any individual module errors # if the new state is complete if publish_report.modules_state in constants.COMPLETE_STATES: self._render_module_errors(publish_report.modules_individual_errors) # Before finishing update the state self.publish_modules_last_state = publish_report.modules_state def _display_publish_metadata_step(self, publish_report): # Do nothing if it hasn't started yet or has already finished if publish_report.metadata_state == constants.STATE_NOT_STARTED or \ self.publish_metadata_last_state in constants.COMPLETE_STATES: return # Only render this on the first non-not-started state if self.publish_metadata_last_state == constants.STATE_NOT_STARTED: self.prompt.write(_('Generating repository metadata...'), tag='generating') if publish_report.metadata_state == constants.STATE_RUNNING: self.publish_metadata_spinner.next() elif publish_report.metadata_state == constants.STATE_SUCCESS: self.publish_metadata_spinner.next(finished=True) self.prompt.write(_('... completed'), tag='completed') self.prompt.render_spacer() elif publish_report.metadata_state == constants.STATE_FAILED: self.publish_metadata_spinner.next(finished=True) self.prompt.render_failure_message(_('... failed')) self.prompt.render_spacer() self._render_error(publish_report.modules_error_message, publish_report.modules_exception, publish_report.modules_traceback) self.publish_metadata_last_state = publish_report.metadata_state def _display_publish_http_https_step(self, publish_report): # -- HTTP -------- if publish_report.publish_http != constants.STATE_NOT_STARTED and \ self.publish_http_last_state not in constants.COMPLETE_STATES: self.prompt.write(_('Publishing repository over HTTP...')) if publish_report.publish_http == constants.STATE_SUCCESS: self.prompt.write(_('... completed'), tag='http-completed') elif publish_report.publish_http == constants.STATE_SKIPPED: self.prompt.write(_('... skipped'), tag='http-skipped') else: self.prompt.write(_('... unknown'), tag='http-unknown') self.publish_http_last_state = publish_report.publish_http self.prompt.render_spacer() # -- HTTPS -------- if publish_report.publish_https != constants.STATE_NOT_STARTED and \ self.publish_https_last_state not in constants.COMPLETE_STATES: self.prompt.write(_('Publishing repository over HTTPS...')) if publish_report.publish_https == constants.STATE_SUCCESS: self.prompt.write(_('... completed'), tag='https-completed') elif publish_report.publish_https == constants.STATE_SKIPPED: self.prompt.write(_('... skipped'), tag='https-skipped') else: self.prompt.write(_('... unknown'), tag='https-unknown') self.publish_https_last_state = publish_report.publish_https def _render_itemized_in_progress_state(self, items_done, items_total, type_name, progress_bar, current_state): """ This is a pretty ugly way of reusing similar code between the publish steps for packages and distributions. There might be a cleaner way but I was having trouble updating the correct state variable and frankly I'm out of time. Feel free to fix this if you are inspired. """ # For the progress bar to work, we can't write anything after it until # we're completely finished with it. Assemble the download summary into # a string and let the progress bar render it. message_data = { 'name' : type_name.title(), 'items_done' : items_done, 'items_total' : items_total, } template = _('%(name)s: %(items_done)s/%(items_total)s items') bar_message = template % message_data # If there's nothing to download in this step, flag the bar as complete if items_total is 0: items_total = items_done = 1 progress_bar.render(items_done, items_total, message=bar_message) if current_state == constants.STATE_SUCCESS: self.prompt.write(_('... completed')) self.prompt.render_spacer() def _render_module_errors(self, individual_errors): """ :param individual_errors: dictionary where keys are module names and values are dicts with keys 'exception' and 'traceback'. :type individual_errors: dict """ if individual_errors: # TODO: read this from config display_error_count = 20 self.prompt.render_failure_message(_('Could not import the following modules:')) for module_error in individual_errors[:display_error_count]: msg = _(' %(module)s: %(error)s') msg = msg % {'module': module_error['module'], 'error': module_error['exception']} self.prompt.write(msg, color=COLOR_FAILURE)<|fim▁hole|> def _render_error(self, error_message, exception, traceback): msg = _('The following error was encountered during the previous ' 'step. More information can be found by passing -v flag one or more times') self.prompt.render_failure_message(msg) self.prompt.render_spacer() self.prompt.render_failure_message(' %s' % error_message) self.context.logger.error(error_message) self.context.logger.error(exception) self.context.logger.error(traceback)<|fim▁end|>
self.prompt.render_spacer()
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup, find_packages XMODULES = [ "abtest = xmodule.abtest_module:ABTestDescriptor", "book = xmodule.backcompat_module:TranslateCustomTagDescriptor", "chapter = xmodule.seq_module:SequenceDescriptor", "combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor", "conditional = xmodule.conditional_module:ConditionalDescriptor", "course = xmodule.course_module:CourseDescriptor", "customtag = xmodule.template_module:CustomTagDescriptor", "discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor", "html = xmodule.html_module:HtmlDescriptor", "image = xmodule.backcompat_module:TranslateCustomTagDescriptor", "error = xmodule.error_module:ErrorDescriptor", "peergrading = xmodule.peer_grading_module:PeerGradingDescriptor", "poll_question = xmodule.poll_module:PollDescriptor", "problem = xmodule.capa_module:CapaDescriptor", "problemset = xmodule.seq_module:SequenceDescriptor", "randomize = xmodule.randomize_module:RandomizeDescriptor", "section = xmodule.backcompat_module:SemanticSectionDescriptor", "sequential = xmodule.seq_module:SequenceDescriptor", "slides = xmodule.backcompat_module:TranslateCustomTagDescriptor", "vertical = xmodule.vertical_module:VerticalDescriptor", "video = xmodule.video_module:VideoDescriptor", "videoalpha = xmodule.video_module:VideoDescriptor", "videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor", "videosequence = xmodule.seq_module:SequenceDescriptor", "discussion = xmodule.discussion_module:DiscussionDescriptor", "course_info = xmodule.html_module:CourseInfoDescriptor", "static_tab = xmodule.html_module:StaticTabDescriptor", "custom_tag_template = xmodule.raw_module:RawDescriptor", "about = xmodule.html_module:AboutDescriptor", "wrapper = xmodule.wrapper_module:WrapperDescriptor", "graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor", "annotatable = xmodule.annotatable_module:AnnotatableDescriptor", "textannotation = xmodule.textannotation_module:TextAnnotationDescriptor", "videoannotation = xmodule.videoannotation_module:VideoAnnotationDescriptor", "foldit = xmodule.foldit_module:FolditDescriptor", "word_cloud = xmodule.word_cloud_module:WordCloudDescriptor", "hidden = xmodule.hidden_module:HiddenDescriptor", "raw = xmodule.raw_module:RawDescriptor", "crowdsource_hinter = xmodule.crowdsource_hinter:CrowdsourceHinterDescriptor", "lti = xmodule.lti_module:LTIDescriptor", ] setup( name="XModule", version="0.1", packages=find_packages(exclude=["tests"]), install_requires=[ 'distribute', 'docopt', 'capa', 'path.py', 'webob', ], package_data={ 'xmodule': ['js/module/*'], }, # See http://guide.python-distribute.org/creation.html#entry-points # for a description of entry_points entry_points={ 'xblock.v1': XMODULES, 'xmodule.v1': XMODULES,<|fim▁hole|> 'console_scripts': [ 'xmodule_assets = xmodule.static_content:main', ], }, )<|fim▁end|>
<|file_name|>rijndael_test.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python """ cryptopy.cipher.rijndael_test Tests for the rijndael encryption algorithm Copyright (c) 2002 by Paul A. Lambert Read LICENSE.txt for license information. """ from cryptopy.cipher.rijndael import Rijndael from cryptopy.cipher.base import noPadding from binascii import a2b_hex import unittest class Rijndael_TestVectors(unittest.TestCase): """ Test Rijndael algorithm using know values.""" def testGladman_dev_vec(self): """ All 25 combinations of block and key size. These test vectors were generated by Dr Brian Gladman using the program aes_vec.cpp <[email protected]> 24th May 2001. vectors in file: dev_vec.txt http://fp.gladman.plus.com/cryptography_technology/rijndael/index.htm """ def RijndaelTestVec(i, key, pt, ct): """ Run single AES test vector with any legal blockSize and any legal key size. """ bkey, plainText, cipherText = a2b_hex(key), a2b_hex(pt), a2b_hex(ct) kSize = len(bkey) bSize = len(cipherText) # set block size to length of block alg = Rijndael(bkey, keySize=kSize, blockSize=bSize, padding=noPadding()) self.assertEqual( alg.encrypt(plainText), cipherText ) self.assertEqual( alg.decrypt(cipherText), plainText ) RijndaelTestVec( i = 'dev_vec.txt 16 byte block, 16 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c', pt = '3243f6a8885a308d313198a2e0370734', ct = '3925841d02dc09fbdc118597196a0b32') RijndaelTestVec( i = 'dev_vec.txt 16 byte block, 20 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160', pt = '3243f6a8885a308d313198a2e0370734', ct = '231d844639b31b412211cfe93712b880') RijndaelTestVec( i = 'dev_vec.txt 16 byte block, 24 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da5', pt = '3243f6a8885a308d313198a2e0370734', ct = 'f9fb29aefc384a250340d833b87ebc00') RijndaelTestVec( i = 'dev_vec.txt 16 byte block, 28 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d90', pt = '3243f6a8885a308d313198a2e0370734', ct = '8faa8fe4dee9eb17caa4797502fc9d3f') RijndaelTestVec( i = 'dev_vec.txt 16 byte block, 32 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d9045190cfe', pt = '3243f6a8885a308d313198a2e0370734', ct = '1a6e6c2c662e7da6501ffb62bc9e93f3')<|fim▁hole|> key = '2b7e151628aed2a6abf7158809cf4f3c', pt = '3243f6a8885a308d313198a2e03707344a409382', ct = '16e73aec921314c29df905432bc8968ab64b1f51') RijndaelTestVec( i = 'dev_vec.txt 20 byte block, 20 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160', pt = '3243f6a8885a308d313198a2e03707344a409382', ct = '0553eb691670dd8a5a5b5addf1aa7450f7a0e587') RijndaelTestVec( i = 'dev_vec.txt 20 byte block, 24 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da5', pt = '3243f6a8885a308d313198a2e03707344a409382', ct = '73cd6f3423036790463aa9e19cfcde894ea16623') RijndaelTestVec( i = 'dev_vec.txt 20 byte block, 28 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d90', pt = '3243f6a8885a308d313198a2e03707344a409382', ct = '601b5dcd1cf4ece954c740445340bf0afdc048df') RijndaelTestVec( i = 'dev_vec.txt 20 byte block, 32 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d9045190cfe', pt = '3243f6a8885a308d313198a2e03707344a409382', ct = '579e930b36c1529aa3e86628bacfe146942882cf') RijndaelTestVec( i = 'dev_vec.txt 24 byte block, 16 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d', ct = 'b24d275489e82bb8f7375e0d5fcdb1f481757c538b65148a') RijndaelTestVec( i = 'dev_vec.txt 24 byte block, 20 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d', ct = '738dae25620d3d3beff4a037a04290d73eb33521a63ea568') RijndaelTestVec( i = 'dev_vec.txt 24 byte block, 24 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da5', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d', ct = '725ae43b5f3161de806a7c93e0bca93c967ec1ae1b71e1cf') RijndaelTestVec( i = 'dev_vec.txt 24 byte block, 28 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d90', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d', ct = 'bbfc14180afbf6a36382a061843f0b63e769acdc98769130') RijndaelTestVec( i = 'dev_vec.txt 24 byte block, 32 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d9045190cfe', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d', ct = '0ebacf199e3315c2e34b24fcc7c46ef4388aa475d66c194c') RijndaelTestVec( i = 'dev_vec.txt 28 byte block, 16 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa9', ct = 'b0a8f78f6b3c66213f792ffd2a61631f79331407a5e5c8d3793aceb1') RijndaelTestVec( i = 'dev_vec.txt 28 byte block, 20 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa9', ct = '08b99944edfce33a2acb131183ab0168446b2d15e958480010f545e3') RijndaelTestVec( i = 'dev_vec.txt 28 byte block, 24 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da5', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa9', ct = 'be4c597d8f7efe22a2f7e5b1938e2564d452a5bfe72399c7af1101e2') RijndaelTestVec( i = 'dev_vec.txt 28 byte block, 28 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d90', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa9', ct = 'ef529598ecbce297811b49bbed2c33bbe1241d6e1a833dbe119569e8') RijndaelTestVec( i = 'dev_vec.txt 28 byte block, 32 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d9045190cfe', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa9', ct = '02fafc200176ed05deb8edb82a3555b0b10d47a388dfd59cab2f6c11') RijndaelTestVec( i = 'dev_vec.txt 32 byte block, 16 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c8', ct = '7d15479076b69a46ffb3b3beae97ad8313f622f67fedb487de9f06b9ed9c8f19') RijndaelTestVec( i = 'dev_vec.txt 32 byte block, 20 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c8', ct = '514f93fb296b5ad16aa7df8b577abcbd484decacccc7fb1f18dc567309ceeffd') RijndaelTestVec( i = 'dev_vec.txt 32 byte block, 24 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da5', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c8', ct = '5d7101727bb25781bf6715b0e6955282b9610e23a43c2eb062699f0ebf5887b2') RijndaelTestVec( i = 'dev_vec.txt 32 byte block, 28 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d90', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c8', ct = 'd56c5a63627432579e1dd308b2c8f157b40a4bfb56fea1377b25d3ed3d6dbf80') RijndaelTestVec( i = 'dev_vec.txt 32 byte block, 32 byte key', key = '2b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d9045190cfe', pt = '3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c8', ct = 'a49406115dfb30a40418aafa4869b7c6a886ff31602a7dd19c889dc64f7e4e7a') # Make this test module runnable from the command prompt if __name__ == "__main__": unittest.main()<|fim▁end|>
RijndaelTestVec( i = 'dev_vec.txt 20 byte block, 16 byte key',
<|file_name|>basemodule.js<|end_file_name|><|fim▁begin|>// Copyright 2008 The Closure Library Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. <|fim▁hole|> * code to be modularized, giving the benefits of lazy loading and loading on * demand. * */ goog.provide('goog.module.BaseModule'); goog.require('goog.Disposable'); /** * A basic module object that represents a module of Javascript code that can * be dynamically loaded. * * @constructor * @extends {goog.Disposable} */ goog.module.BaseModule = function() { goog.Disposable.call(this); }; goog.inherits(goog.module.BaseModule, goog.Disposable); /** * Performs any load-time initialization that the module requires. * @param {Object} context The module context. */ goog.module.BaseModule.prototype.initialize = function(context) {};<|fim▁end|>
/** * @fileoverview Defines the base class for a module. This is used to allow the
<|file_name|>ShadowVolume.cpp<|end_file_name|><|fim▁begin|>#include <osgShadow/ShadowVolume> #include <iostream> #include <string> #include <osg/Vec3> #include <osg/Vec4> #include <osg/io_utils> #include <osgDB/Registry> #include <osgDB/Input> #include <osgDB/Output> #include <osgDB/ParameterOutput> bool ShadowVolume_readLocalData(osg::Object &obj, osgDB::Input &fr); bool ShadowVolume_writeLocalData(const osg::Object &obj, osgDB::Output &fw); osgDB::RegisterDotOsgWrapperProxy ShadowVolume_Proxy ( new osgShadow::ShadowVolume, "ShadowVolume", "Object ShadowTechnique ShadowVolume", ShadowVolume_readLocalData, ShadowVolume_writeLocalData ); bool ShadowVolume_readLocalData(osg::Object& obj, osgDB::Input &fr) {<|fim▁hole|> return itAdvanced; } bool ShadowVolume_writeLocalData(const osg::Object& obj, osgDB::Output& fw) { const osgShadow::ShadowVolume& ss = static_cast<const osgShadow::ShadowVolume &>(obj); return true; }<|fim▁end|>
osgShadow::ShadowVolume& ss = static_cast<osgShadow::ShadowVolume&>(obj); bool itAdvanced = false;
<|file_name|>TodoListClient.java<|end_file_name|><|fim▁begin|>package com.myconnector.client.domain; import java.util.List; import com.google.gwt.user.client.rpc.IsSerializable; import com.myconnector.client.domain.interfaces.ITodoContext; import com.myconnector.client.domain.interfaces.ITodoItem; import com.myconnector.client.domain.interfaces.ITodoList; public class TodoListClient implements IsSerializable, ITodoList { private Long id; private String title; private boolean todoItemsLoaded = false; private List<ITodoItem> todoItems; private Integer position; private ITodoContext context; public TodoListClient() { } public TodoListClient(Long id, String title) { super(); this.id = id; this.title = title; } public Long getId() { return id; }<|fim▁hole|> public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public List<ITodoItem> getTodoItems() { return todoItems; } public void setTodoItems(List<ITodoItem> todoItems) { this.todoItems = todoItems; } public Integer getPosition() { return position; } public void setPosition(Integer position) { this.position = position; } public boolean isTodoItemsLoaded() { return todoItemsLoaded; } public void setTodoItemsLoaded(boolean todoItemsLoaded) { this.todoItemsLoaded = todoItemsLoaded; } public ITodoContext getContext() { return context; } public void setContext(ITodoContext context) { this.context = context; } }<|fim▁end|>
public void setId(Long id) { this.id = id; }
<|file_name|>editor.py<|end_file_name|><|fim▁begin|>import wx import os.path class MainWindow( wx.Frame ): def __init__( self, filename = '*.txt' ): super( MainWindow, self ).__init__( None, size = ( 800,640 ) ) self.filename = filename self.dirname = '.' self.panel = wx.Panel( self, -1 ) self.CreateInteriorWindowComponents() sizer = wx.BoxSizer() sizer.Add( self.multiText, proportion = 1, flag = wx.CENTER|wx.EXPAND ) self.panel.SetSizer( sizer ) self.CreateExteriorWindowComponents() self.multiText.Bind( wx.EVT_KEY_UP, self.updateLineCol ) self.multiText.Bind( wx.EVT_LEFT_DOWN, self.updateLineCol ) def CreateInteriorWindowComponents( self ): self.multiText = wx.TextCtrl( self.panel, style = wx.TE_MULTILINE ) def updateLineCol( self, event ): l,c = self.multiText.PositionToXY( self.multiText.GetInsertionPoint() ) stat = "col=%s, row=%s" % ( l,c ) self.StatusBar.SetStatusText( stat, number = 0 ) event.Skip() def CreateExteriorWindowComponents( self ): self.CreateMenu() self.CreateStatusBar() self.SetTitle() def CreateMenu( self ): fileMenu = wx.Menu() for id, label, helpText, handler in \ [( wx.ID_OPEN, '&Open', 'Open a new file', self.OnOpen ), ( wx.ID_SAVE, '&Save', 'Save the current file', self.OnSave ), ( wx.ID_SAVEAS, 'Save &As', 'Save the file under a different name', self.OnSaveAs ), ( None, None, None, None ), ( wx.ID_EXIT, 'E&xit', 'Terminate the program', self.OnExit )]: if id == None: fileMenu.AppendSeparator() else: item = fileMenu.Append( id, label, helpText ) self.Bind( wx.EVT_MENU, handler, item ) editMenu = wx.Menu() for id, label, helpText, handler in \ [( wx.ID_COPY, '&Copy', 'Copy selected text', self.OnCopy ), ( wx.ID_PASTE, '&Paste', 'Paste clipboard text', self.OnPaste )]: if id == None: editMenu.AppendSeparator() else: item = editMenu.Append( id, label, helpText ) self.Bind( wx.EVT_MENU, handler, item ) aboutMenu = wx.Menu() for id, label, helpText, handler in \ [( wx.ID_ABOUT, '&About', 'Information about this program', self.OnAbout )]: if id == None: aboutMenu.AppendSeparator() else: item = aboutMenu.Append( id, label, helpText ) self.Bind( wx.EVT_MENU, handler, item ) menuBar = wx.MenuBar() menuBar.Append( fileMenu, '&File' ) # Add the fileMenu to the MenuBar menuBar.Append( editMenu, '&Edit' ) menuBar.Append( aboutMenu, '&About' ) self.SetMenuBar( menuBar ) # Add the menuBar to the Frame def SetTitle( self ): super( MainWindow, self ).SetTitle( 'ATE %s'%self.filename ) # helper methods def defaultFileDialogOptions( self ): return dict( message = 'Choose a file', defaultDir = self.dirname, wildcard = '*.*' ) def askUserForFilename (self, **dialogOptions ): dialog = wx.FileDialog( self, **dialogOptions ) if dialog.ShowModal() == wx.ID_OK: userProvidedFilename = True self.filename = dialog.GetFilename() self.dirname = dialog.GetDirectory()<|fim▁hole|> dialog.Destroy() return userProvidedFilename # event handlers def OnAbout( self, event ): dialog = wx.MessageDialog( self, 'A sample editor\n' 'in wxPython', 'About Sample Editor', wx.OK ) dialog.ShowModal() dialog.Destroy() def OnExit( self, event ): self.Close() def OnSave( self, event ): if os.path.exists( self.filename ): self.OnSaveFile( event ) else: self.OnSaveAs( event ) def OnOpen( self, event ): if self.askUserForFilename( style = wx.OPEN, **self.defaultFileDialogOptions() ): textfile = open( os.path.join( self.dirname, self.filename ), 'r' ) self.multiText.SetValue( textfile.read() ) textfile.close() def OnSaveFile( self, event ): textfile = open( os.path.join( self.dirname, self.filename ), 'w' ) textfile.write( self.multiText.GetValue() ) textfile.close() def OnSaveAs( self, event ): if self.askUserForFilename( defaultFile = self.filename, style = wx.SAVE, **self.defaultFileDialogOptions() ): self.OnSaveFile( event ) # clipboard functions, flush for other programs def OnCopy( self, event ): self.dataObj = wx.TextDataObject() self.dataObj.SetText( self.multiText.GetStringSelection() ) if wx.TheClipboard.Open(): wx.TheClipboard.SetData( self.dataObj ) wx.TheClipboard.Flush() else: wx.MessageBox( "Unable to open the clipboard", "Error" ) def OnPaste( self, event ): if wx.TheClipboard.Open(): dataObj = wx.TextDataObject() success = wx.TheClipboard.GetData( dataObj ) wx.TheClipboard.Flush() wx.TheClipboard.Close() if not success: return text = dataObj.GetText() if text: self.multiText.WriteText( text ) app = wx.App() frame = MainWindow() frame.Show() app.MainLoop()<|fim▁end|>
self.SetTitle() else: userProvidedFilename = False
<|file_name|>rpcmanager.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- """ Manager of RPC calls from plugins. """ from golismero.api.config import Config __license__ = """ GoLismero 2.0 - The web knife - Copyright (C) 2011-2014 Golismero project site: https://github.com/golismero Golismero project mail: [email protected] This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ __all__ = ["RPCManager"] from ..common import pickle from ..messaging.codes import MessageCode, MSG_RPC_CODES from ..messaging.manager import MessageManager from functools import partial from threading import Thread import sys import traceback #------------------------------------------------------------------------------ # Decorators to automatically register RPC implementors at import time. # Global map of RPC codes to implementors. # dict( int -> tuple(callable, bool) ) rpcMap = {} def implementor(rpc_code, blocking=False): """ RPC implementation function. """ return partial(_add_implementor, rpc_code, blocking) def _add_implementor(rpc_code, blocking, fn): # Validate the argument types. if type(rpc_code) is not int: raise TypeError("Expected int, got %r instead" % type(rpc_code)) if type(blocking) is not bool: raise TypeError("Expected bool, got %r instead" % type(blocking)) if not callable(fn): raise TypeError("Expected callable, got %r instead" % type(fn)) # Validate the RPC code. if rpc_code in rpcMap: try: msg = "Duplicated RPC implementors for code %d: %s and %s" msg %= (rpc_code, rpcMap[rpc_code][0].__name__, fn.__name__) except Exception: msg = "Duplicated RPC implementors for code: %d" % rpc_code raise SyntaxError(msg) # TODO: use introspection to validate the function signature # Register the implementor. rpcMap[rpc_code] = (fn, blocking) # Return the implementor. No wrapping is needed! :) return fn #------------------------------------------------------------------------------ # Implementor for the special MSG_RPC_BULK code for bulk RPC calls. @implementor(MessageCode.MSG_RPC_BULK) def rpc_bulk(orchestrator, audit_name, rpc_code, *arguments): # Get the implementor for the RPC code. # Raise NotImplementedError if it's not defined. try: method, blocking = rpcMap[rpc_code] except KeyError: raise NotImplementedError("RPC code not implemented: %r" % rpc_code) # This can't be done with blocking implementors! if blocking: raise NotImplementedError( "Cannot run blocking RPC calls in bulk. Code: %r" % rpc_code) # Prepare a partial function call to the implementor. caller = partial(method, orchestrator, audit_name) # Use the built-in map() function to issue all the calls. # This ensures we support the exact same interface and functionality. return map(caller, *arguments) #------------------------------------------------------------------------------ # Ensures the message is received by the Orchestrator. @implementor(MessageCode.MSG_RPC_SEND_MESSAGE) def rpc_send_message(orchestrator, audit_name, message): # Enqueue the ACK message. orchestrator.enqueue_msg(message) #------------------------------------------------------------------------------ class RPCManager (object): """ Executes remote procedure calls from plugins. """ #-------------------------------------------------------------------------- def __init__(self, orchestrator): """ :param orchestrator: Orchestrator instance. :type orchestrator: Orchestrator """ # Keep a reference to the Orchestrator. self.__orchestrator = orchestrator # Keep a reference to the global RPC map (it's faster this way). self.__rpcMap = rpcMap # Check all RPC messages have been mapped at this point. missing = MSG_RPC_CODES.difference(self.__rpcMap.keys()) if missing: msg = "Missing RPC implementors for codes: %s" msg %= ", ".join(str(x) for x in sorted(missing)) raise SyntaxError(msg) #-------------------------------------------------------------------------- @property def orchestrator(self): """ :returns: Orchestrator instance. :rtype: Orchestrator """ return self.__orchestrator #-------------------------------------------------------------------------- def execute_rpc(self, audit_name, rpc_code, response_queue, args, kwargs): """ Honor a remote procedure call request from a plugin. :param audit_name: Name of the audit requesting the call. :type audit_name: str :param rpc_code: RPC code. :type rpc_code: int :param response_queue: Response queue identity. :type response_queue: str :param args: Positional arguments to the call. :type args: tuple :param kwargs: Keyword arguments to the call. :type kwargs: dict """ try: # Get the implementor for the RPC code. # Raise NotImplementedError if it's not defined. try: target, blocking = self.__rpcMap[rpc_code] except KeyError: raise NotImplementedError( "RPC code not implemented: %r" % rpc_code) # If it's a blocking call... if blocking: # Run the implementor in a new thread. thread = Thread( target = self._execute_rpc_implementor_background, args = ( Config._context, audit_name, target, response_queue, args, kwargs), ) thread.daemon = True thread.start() # If it's a non-blocking call... else: # Call the implementor directly. self.execute_rpc_implementor( audit_name, target, response_queue, args, kwargs) # Catch exceptions and send them back. except Exception: if response_queue: error = self.prepare_exception(*sys.exc_info()) try: self.orchestrator.messageManager.send( response_queue, (False, error)) except IOError: import warnings warnings.warn("RPC caller died!") pass #-------------------------------------------------------------------------- def _execute_rpc_implementor_background(self, context, audit_name, target, response_queue, args, kwargs): """ Honor a remote procedure call request from a plugin, from a background thread. Must only be used as the entry point for said background thread! :param context: Plugin execution context. :type context: PluginContext :param audit_name: Name of the audit requesting the call. :type audit_name: str :param target: RPC implementor function. :type target: callable :param response_queue: Response queue identity. :type response_queue: str :param args: Positional arguments to the call. :type args: tuple :param kwargs: Keyword arguments to the call. :type kwargs: dict """ Config._context = context self.execute_rpc_implementor( audit_name, target, response_queue, args, kwargs) #-------------------------------------------------------------------------- def execute_rpc_implementor(self, audit_name, target, response_queue, args, kwargs): """ Honor a remote procedure call request from a plugin. :param audit_name: Name of the audit requesting the call. :type audit_name: str :param target: RPC implementor function. :type target: callable :param response_queue: Response queue identity. :type response_queue: str :param args: Positional arguments to the call. :type args: tuple :param kwargs: Keyword arguments to the call. :type kwargs: dict """ try: # Call the implementor and get the response. response = target(self.orchestrator, audit_name, *args, **kwargs) success = True # Catch exceptions and prepare them for sending. except Exception: if response_queue: response = self.prepare_exception(*sys.exc_info()) success = False # If the call was synchronous, # send the response/error back to the plugin. if response_queue: self.orchestrator.messageManager.send( response_queue, (success, response)) #-------------------------------------------------------------------------- @staticmethod def prepare_exception(exc_type, exc_value, exc_traceback): """ Prepare an exception for sending back to the plugins. :param exc_type: Exception type. :type exc_type: class :param exc_value: Exception value. :type exc_value: :returns: Exception type, exception value and formatted traceback. The exception value may be formatted too and the exception type replaced by Exception if it's not possible to serialize it for sending.<|fim▁hole|> try: pickle.dumps(exc_value, -1) except Exception: exc_value = traceback.format_exception_only(exc_type, exc_value) try: pickle.dumps(exc_type, -1) except Exception: exc_type = Exception exc_traceback = traceback.extract_tb(exc_traceback) return exc_type, exc_value, exc_traceback<|fim▁end|>
:rtype: tuple(class, object, str) """ exc_type, exc_value, exc_traceback = sys.exc_info()
<|file_name|>body4.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python from body3 import * function_decl(link='extern',srcp='eval.c:216', body=bind_expr( body=statement_list( E0=decl_expr( ftype=void_type(algn='8',name='126')), E1=decl_expr( ftype=void_type(algn='8',name='126')), E2=modify_expr( OP0=var_decl(algn='32',srcp='eval.c:53',used='1', name=identifier_node(string='need_here_doc')), OP1=integer_cst(low='0', ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))), E3=call_expr( fn=addr_expr( OP0=pointer_type(algn='64')), ftype=void_type(algn='8',name='126')), E4=cond_expr( OP0=truth_andif_expr( OP0=ne_expr( OP0=var_decl(algn='32',srcp='shell.h:94',used='1', name=identifier_node(string='interactive')), OP1=integer_cst(low='0', ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))), OP1=ne_expr( OP0=nop_expr( OP0=component_ref( OP0=var_decl(algn='64',srcp='input.h:89',used='1', name=identifier_node(string='bash_input')), OP1=field_decl(algn='32',srcp='input.h:82', name=identifier_node(string='type'))), ftype=integer_type(algn='32',max='29',min='28',name='17',prec='32',sign='unsigned',size='5')), OP1=integer_cst(low='3', ftype=integer_type(algn='32',max='29',min='28',name='17',prec='32',sign='unsigned',size='5')))), OP1=statement_list( E0=modify_expr( OP0=var_decl(algn='64',srcp='eval.c:219',used='1', name=identifier_node(string='command_to_execute')), OP1=call_expr( E0=nop_expr( OP0=addr_expr( OP0=pointer_type(algn='64'), ftype=string_cst(string='PROMPT_COMMAND', ftype=array_type(algn='8',domn='13067',elts='9',size='13066'))), ftype=pointer_type(algn='64',ptd='906',size='22')), fn=addr_expr( OP0=pointer_type(algn='64')), ftype=pointer_type(algn='64',ptd='9',size='22')), ftype=pointer_type(algn='64',ptd='9',size='22')), E1=cond_expr( OP0=ne_expr( OP0=var_decl(algn='64',srcp='eval.c:219',used='1', name=identifier_node(string='command_to_execute')), OP1=integer_cst(low='0', ftype=pointer_type(algn='64',ptd='9',size='22')), ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')), OP1=call_expr( E0=var_decl(algn='64',srcp='eval.c:219',used='1', name=identifier_node(string='command_to_execute')), E1=nop_expr( OP0=addr_expr( OP0=pointer_type(algn='64'), ftype=string_cst(string='PROMPT_COMMAND', ftype=array_type(algn='8',domn='13067',elts='9',size='13066'))), ftype=pointer_type(algn='64',ptd='9',size='22')), fn=addr_expr( OP0=pointer_type(algn='64'), ftype=function_decl(body='undefined',ftype='10721',link='extern',name='10720',srcp='input.h:105')), ftype=void_type(algn='8',name='126')), ftype=void_type(algn='8',name='126')), E2=cond_expr( OP0=eq_expr( OP0=var_decl(algn='32',srcp='eval.c:51',used='1', name=identifier_node(string='running_under_emacs')), OP1=integer_cst(low='2', ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))), OP1=call_expr( fn=addr_expr( OP0=pointer_type(algn='64')), ftype=void_type(algn='8',name='126')), ftype=void_type(algn='8',name='126'))), ftype=void_type(algn='8',name='126')), E5=modify_expr( OP0=var_decl(algn='32',srcp='eval.c:54',used='1', name=identifier_node(string='current_command_line_count')), OP1=integer_cst(low='0', ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))), E6=modify_expr( OP0=var_decl(algn='32',srcp='eval.c:218',used='1', name=identifier_node(string='r')), OP1=call_expr( fn=addr_expr( OP0=pointer_type(algn='64'), ftype=function_decl(body='undefined',ftype='2560',link='extern',name='12695',srcp='externs.h:104')), ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')), ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')), E7=cond_expr( OP0=ne_expr( OP0=var_decl(algn='32',srcp='eval.c:53',used='1', name=identifier_node(string='need_here_doc')), OP1=integer_cst(low='0', ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))), OP1=call_expr( fn=addr_expr(<|fim▁hole|> ftype=function_decl(body='undefined',ftype='5191',link='extern',name='10700',srcp='input.h:104')), ftype=void_type(algn='8',name='126')), ftype=void_type(algn='8',name='126')), E8=return_expr( expr=modify_expr( OP0=result_decl(algn='32',note='art:artificial',srcp='eval.c:216'), OP1=var_decl(algn='32',srcp='eval.c:218',used='1', name=identifier_node(string='r')), ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')), ftype=void_type(algn='8',name='126'))), ftype=void_type(algn='8',name='126'), vars=var_decl(algn='32',srcp='eval.c:218',used='1', name=identifier_node(string='r'))), name=identifier_node(string='parse_command'))<|fim▁end|>
OP0=pointer_type(algn='64'),
<|file_name|>query.cc<|end_file_name|><|fim▁begin|>/** * @file query.cc * * @section LICENSE * * The MIT License * * @copyright Copyright (c) 2017-2021 TileDB, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * @section DESCRIPTION * * This file implements class Query. */ #include "tiledb/sm/query/query.h" #include "tiledb/common/heap_memory.h" #include "tiledb/common/logger.h" #include "tiledb/common/memory.h" #include "tiledb/sm/array/array.h" #include "tiledb/sm/enums/query_status.h" #include "tiledb/sm/enums/query_type.h" #include "tiledb/sm/fragment/fragment_metadata.h" #include "tiledb/sm/misc/parse_argument.h" #include "tiledb/sm/query/dense_reader.h" #include "tiledb/sm/query/global_order_writer.h" #include "tiledb/sm/query/ordered_writer.h" #include "tiledb/sm/query/query_condition.h" #include "tiledb/sm/query/reader.h" #include "tiledb/sm/query/sparse_global_order_reader.h" #include "tiledb/sm/query/sparse_unordered_with_dups_reader.h" #include "tiledb/sm/query/unordered_writer.h" #include "tiledb/sm/rest/rest_client.h" #include "tiledb/sm/storage_manager/storage_manager.h" #include "tiledb/sm/tile/writer_tile.h" #include <cassert> #include <iostream> #include <sstream> using namespace tiledb::common; using namespace tiledb::sm::stats; namespace tiledb { namespace sm { /* ****************************** */ /* CONSTRUCTORS & DESTRUCTORS */ /* ****************************** */ Query::Query(StorageManager* storage_manager, Array* array, URI fragment_uri) : array_(array) , array_schema_(array->array_schema_latest_ptr()) , layout_(Layout::ROW_MAJOR) , storage_manager_(storage_manager) , stats_(storage_manager_->stats()->create_child("Query")) , logger_(storage_manager->logger()->clone("Query", ++logger_id_)) , has_coords_buffer_(false) , has_zipped_coords_buffer_(false) , coord_buffer_is_set_(false) , coord_data_buffer_is_set_(false) , coord_offsets_buffer_is_set_(false) , data_buffer_name_("") , offsets_buffer_name_("") , disable_check_global_order_(false) , fragment_uri_(fragment_uri) { assert(array->is_open()); auto st = array->get_query_type(&type_); assert(st.ok()); if (type_ == QueryType::WRITE) { subarray_ = Subarray(array, stats_, logger_); } else { subarray_ = Subarray(array, Layout::ROW_MAJOR, stats_, logger_); } fragment_metadata_ = array->fragment_metadata(); coords_info_.coords_buffer_ = nullptr; coords_info_.coords_buffer_size_ = nullptr; coords_info_.coords_num_ = 0; coords_info_.has_coords_ = false; callback_ = nullptr; callback_data_ = nullptr; status_ = QueryStatus::UNINITIALIZED; if (storage_manager != nullptr) config_ = storage_manager->config(); // Set initial subarray configuration subarray_.set_config(config_); rest_scratch_ = make_shared<Buffer>(HERE()); } Query::~Query() { bool found = false; bool use_malloc_trim = false; const Status& st = config_.get<bool>("sm.mem.malloc_trim", &use_malloc_trim, &found); if (st.ok() && found && use_malloc_trim) { tdb_malloc_trim(); } }; /* ****************************** */ /* API */ /* ****************************** */ Status Query::add_range( unsigned dim_idx, const void* start, const void* end, const void* stride) { if (dim_idx >= array_schema_->dim_num()) return logger_->status( Status_QueryError("Cannot add range; Invalid dimension index")); if (start == nullptr || end == nullptr) return logger_->status( Status_QueryError("Cannot add range; Invalid range")); <|fim▁hole|> if (array_schema_->domain()->dimension(dim_idx)->var_size()) return logger_->status( Status_QueryError("Cannot add range; Range must be fixed-sized")); // Prepare a temp range std::vector<uint8_t> range; auto coord_size = array_schema_->dimension(dim_idx)->coord_size(); range.resize(2 * coord_size); std::memcpy(&range[0], start, coord_size); std::memcpy(&range[coord_size], end, coord_size); bool read_range_oob_error = true; if (type_ == QueryType::READ) { // Get read_range_oob config setting bool found = false; std::string read_range_oob = config_.get("sm.read_range_oob", &found); assert(found); if (read_range_oob != "error" && read_range_oob != "warn") return logger_->status(Status_QueryError( "Invalid value " + read_range_oob + " for sm.read_range_obb. Acceptable values are 'error' or 'warn'.")); read_range_oob_error = read_range_oob == "error"; } else { if (!array_schema_->dense()) return logger_->status( Status_QueryError("Adding a subarray range to a write query is not " "supported in sparse arrays")); if (subarray_.is_set(dim_idx)) return logger_->status( Status_QueryError("Cannot add range; Multi-range dense writes " "are not supported")); } // Add range Range r(&range[0], 2 * coord_size); return subarray_.add_range(dim_idx, std::move(r), read_range_oob_error); } Status Query::add_range_var( unsigned dim_idx, const void* start, uint64_t start_size, const void* end, uint64_t end_size) { if (dim_idx >= array_schema_->dim_num()) return logger_->status( Status_QueryError("Cannot add range; Invalid dimension index")); if ((start == nullptr && start_size != 0) || (end == nullptr && end_size != 0)) return logger_->status( Status_QueryError("Cannot add range; Invalid range")); if (!array_schema_->domain()->dimension(dim_idx)->var_size()) return logger_->status( Status_QueryError("Cannot add range; Range must be variable-sized")); if (type_ == QueryType::WRITE) return logger_->status(Status_QueryError( "Cannot add range; Function applicable only to reads")); // Get read_range_oob config setting bool found = false; std::string read_range_oob = config_.get("sm.read_range_oob", &found); assert(found); if (read_range_oob != "error" && read_range_oob != "warn") return logger_->status(Status_QueryError( "Invalid value " + read_range_oob + " for sm.read_range_obb. Acceptable values are 'error' or 'warn'.")); // Add range Range r; r.set_range_var(start, start_size, end, end_size); return subarray_.add_range(dim_idx, std::move(r), read_range_oob == "error"); } Status Query::get_range_num(unsigned dim_idx, uint64_t* range_num) const { if (type_ == QueryType::WRITE && !array_schema_->dense()) return logger_->status( Status_QueryError("Getting the number of ranges from a write query " "is not applicable to sparse arrays")); return subarray_.get_range_num(dim_idx, range_num); } Status Query::get_range( unsigned dim_idx, uint64_t range_idx, const void** start, const void** end, const void** stride) const { if (type_ == QueryType::WRITE && !array_schema_->dense()) return logger_->status( Status_QueryError("Getting a range from a write query is not " "applicable to sparse arrays")); *stride = nullptr; return subarray_.get_range(dim_idx, range_idx, start, end); } Status Query::get_range_var_size( unsigned dim_idx, uint64_t range_idx, uint64_t* start_size, uint64_t* end_size) const { if (type_ == QueryType::WRITE) return logger_->status(Status_QueryError( "Getting a var range size from a write query is not applicable")); return subarray_.get_range_var_size(dim_idx, range_idx, start_size, end_size); ; } Status Query::get_range_var( unsigned dim_idx, uint64_t range_idx, void* start, void* end) const { if (type_ == QueryType::WRITE) return logger_->status(Status_QueryError( "Getting a var range from a write query is not applicable")); uint64_t start_size = 0; uint64_t end_size = 0; subarray_.get_range_var_size(dim_idx, range_idx, &start_size, &end_size); const void* range_start; const void* range_end; const void* stride; RETURN_NOT_OK( get_range(dim_idx, range_idx, &range_start, &range_end, &stride)); std::memcpy(start, range_start, start_size); std::memcpy(end, range_end, end_size); return Status::Ok(); } Status Query::add_range_by_name( const std::string& dim_name, const void* start, const void* end, const void* stride) { unsigned dim_idx; RETURN_NOT_OK( array_schema_->domain()->get_dimension_index(dim_name, &dim_idx)); return add_range(dim_idx, start, end, stride); } Status Query::add_range_var_by_name( const std::string& dim_name, const void* start, uint64_t start_size, const void* end, uint64_t end_size) { unsigned dim_idx; RETURN_NOT_OK( array_schema_->domain()->get_dimension_index(dim_name, &dim_idx)); return add_range_var(dim_idx, start, start_size, end, end_size); } Status Query::get_range_num_from_name( const std::string& dim_name, uint64_t* range_num) const { unsigned dim_idx; RETURN_NOT_OK( array_schema_->domain()->get_dimension_index(dim_name, &dim_idx)); return get_range_num(dim_idx, range_num); } Status Query::get_range_from_name( const std::string& dim_name, uint64_t range_idx, const void** start, const void** end, const void** stride) const { unsigned dim_idx; RETURN_NOT_OK( array_schema_->domain()->get_dimension_index(dim_name, &dim_idx)); return get_range(dim_idx, range_idx, start, end, stride); } Status Query::get_range_var_size_from_name( const std::string& dim_name, uint64_t range_idx, uint64_t* start_size, uint64_t* end_size) const { unsigned dim_idx; RETURN_NOT_OK( array_schema_->domain()->get_dimension_index(dim_name, &dim_idx)); return get_range_var_size(dim_idx, range_idx, start_size, end_size); } Status Query::get_range_var_from_name( const std::string& dim_name, uint64_t range_idx, void* start, void* end) const { unsigned dim_idx; RETURN_NOT_OK( array_schema_->domain()->get_dimension_index(dim_name, &dim_idx)); return get_range_var(dim_idx, range_idx, start, end); } Status Query::get_est_result_size(const char* name, uint64_t* size) { if (type_ == QueryType::WRITE) return logger_->status(Status_QueryError( "Cannot get estimated result size; Operation currently " "unsupported for write queries")); if (name == nullptr) return logger_->status(Status_QueryError( "Cannot get estimated result size; Name cannot be null")); if (name == constants::coords && !array_schema_->domain()->all_dims_same_type()) return logger_->status(Status_QueryError( "Cannot get estimated result size; Not applicable to zipped " "coordinates in arrays with heterogeneous domain")); if (name == constants::coords && !array_schema_->domain()->all_dims_fixed()) return logger_->status(Status_QueryError( "Cannot get estimated result size; Not applicable to zipped " "coordinates in arrays with domains with variable-sized dimensions")); if (array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string( "Cannot get estimated result size; Input attribute/dimension '") + name + "' is nullable")); if (array_->is_remote() && !subarray_.est_result_size_computed()) { auto rest_client = storage_manager_->rest_client(); if (rest_client == nullptr) return logger_->status( Status_QueryError("Error in query estimate result size; remote " "array with no rest client.")); RETURN_NOT_OK( rest_client->get_query_est_result_sizes(array_->array_uri(), this)); } return subarray_.get_est_result_size_internal( name, size, &config_, storage_manager_->compute_tp()); } Status Query::get_est_result_size( const char* name, uint64_t* size_off, uint64_t* size_val) { if (type_ == QueryType::WRITE) return logger_->status(Status_QueryError( "Cannot get estimated result size; Operation currently " "unsupported for write queries")); if (array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string( "Cannot get estimated result size; Input attribute/dimension '") + name + "' is nullable")); if (array_->is_remote() && !subarray_.est_result_size_computed()) { auto rest_client = storage_manager_->rest_client(); if (rest_client == nullptr) return logger_->status( Status_QueryError("Error in query estimate result size; remote " "array with no rest client.")); RETURN_NOT_OK( rest_client->get_query_est_result_sizes(array_->array_uri(), this)); } return subarray_.get_est_result_size( name, size_off, size_val, &config_, storage_manager_->compute_tp()); } Status Query::get_est_result_size_nullable( const char* name, uint64_t* size_val, uint64_t* size_validity) { if (type_ == QueryType::WRITE) return logger_->status(Status_QueryError( "Cannot get estimated result size; Operation currently " "unsupported for write queries")); if (name == nullptr) return logger_->status(Status_QueryError( "Cannot get estimated result size; Name cannot be null")); if (!array_schema_->attribute(name)) return logger_->status(Status_QueryError( "Cannot get estimated result size; Nullable API is only" "applicable to attributes")); if (!array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string("Cannot get estimated result size; Input attribute '") + name + "' is not nullable")); if (array_->is_remote() && !subarray_.est_result_size_computed()) { auto rest_client = storage_manager_->rest_client(); if (rest_client == nullptr) return logger_->status( Status_QueryError("Error in query estimate result size; remote " "array with no rest client.")); return logger_->status( Status_QueryError("Error in query estimate result size; unimplemented " "for nullable attributes in remote arrays.")); } return subarray_.get_est_result_size_nullable( name, size_val, size_validity, &config_, storage_manager_->compute_tp()); } Status Query::get_est_result_size_nullable( const char* name, uint64_t* size_off, uint64_t* size_val, uint64_t* size_validity) { if (type_ == QueryType::WRITE) return logger_->status(Status_QueryError( "Cannot get estimated result size; Operation currently " "unsupported for write queries")); if (!array_schema_->attribute(name)) return logger_->status(Status_QueryError( "Cannot get estimated result size; Nullable API is only" "applicable to attributes")); if (!array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string("Cannot get estimated result size; Input attribute '") + name + "' is not nullable")); if (array_->is_remote() && !subarray_.est_result_size_computed()) { auto rest_client = storage_manager_->rest_client(); if (rest_client == nullptr) return logger_->status( Status_QueryError("Error in query estimate result size; remote " "array with no rest client.")); return logger_->status( Status_QueryError("Error in query estimate result size; unimplemented " "for nullable attributes in remote arrays.")); } return subarray_.get_est_result_size_nullable( name, size_off, size_val, size_validity, &config_, storage_manager_->compute_tp()); } std::unordered_map<std::string, Subarray::ResultSize> Query::get_est_result_size_map() { return subarray_.get_est_result_size_map( &config_, storage_manager_->compute_tp()); } std::unordered_map<std::string, Subarray::MemorySize> Query::get_max_mem_size_map() { return subarray_.get_max_mem_size_map( &config_, storage_manager_->compute_tp()); } Status Query::get_written_fragment_num(uint32_t* num) const { if (type_ != QueryType::WRITE) return logger_->status(Status_QueryError( "Cannot get number of fragments; Applicable only to WRITE mode")); *num = (uint32_t)written_fragment_info_.size(); return Status::Ok(); } Status Query::get_written_fragment_uri(uint32_t idx, const char** uri) const { if (type_ != QueryType::WRITE) return logger_->status(Status_QueryError( "Cannot get fragment URI; Applicable only to WRITE mode")); auto num = (uint32_t)written_fragment_info_.size(); if (idx >= num) return logger_->status( Status_QueryError("Cannot get fragment URI; Invalid fragment index")); *uri = written_fragment_info_[idx].uri_.c_str(); return Status::Ok(); } Status Query::get_written_fragment_timestamp_range( uint32_t idx, uint64_t* t1, uint64_t* t2) const { if (type_ != QueryType::WRITE) return logger_->status(Status_QueryError( "Cannot get fragment timestamp range; Applicable only to WRITE mode")); auto num = (uint32_t)written_fragment_info_.size(); if (idx >= num) return logger_->status(Status_QueryError( "Cannot get fragment timestamp range; Invalid fragment index")); *t1 = written_fragment_info_[idx].timestamp_range_.first; *t2 = written_fragment_info_[idx].timestamp_range_.second; return Status::Ok(); } const Array* Query::array() const { return array_; } Array* Query::array() { return array_; } const ArraySchema& Query::array_schema() const { return *(array_schema_.get()); } std::vector<std::string> Query::buffer_names() const { std::vector<std::string> ret; // Add to the buffer names the attributes, as well as the dimensions only if // coords_buffer_ has not been set for (const auto& it : buffers_) { if (!array_schema_->is_dim(it.first) || (!coords_info_.coords_buffer_)) ret.push_back(it.first); } // Special zipped coordinates name if (coords_info_.coords_buffer_) ret.push_back(constants::coords); return ret; } QueryBuffer Query::buffer(const std::string& name) const { // Special zipped coordinates if (type_ == QueryType::WRITE && name == constants::coords) return QueryBuffer( coords_info_.coords_buffer_, nullptr, coords_info_.coords_buffer_size_, nullptr); // Attribute or dimension auto buf = buffers_.find(name); if (buf != buffers_.end()) return buf->second; // Named buffer does not exist return QueryBuffer{}; } Status Query::finalize() { if (status_ == QueryStatus::UNINITIALIZED) return Status::Ok(); if (array_->is_remote()) { auto rest_client = storage_manager_->rest_client(); if (rest_client == nullptr) return logger_->status(Status_QueryError( "Error in query finalize; remote array with no rest client.")); return rest_client->finalize_query_to_rest(array_->array_uri(), this); } RETURN_NOT_OK(strategy_->finalize()); status_ = QueryStatus::COMPLETED; return Status::Ok(); } Status Query::get_buffer( const char* name, void** buffer, uint64_t** buffer_size) const { // Check attribute if (name != constants::coords) { if (array_schema_->attribute(name) == nullptr && array_schema_->dimension(name) == nullptr) return logger_->status(Status_QueryError( std::string("Cannot get buffer; Invalid attribute/dimension name '") + name + "'")); } if (array_schema_->var_size(name)) return logger_->status(Status_QueryError( std::string("Cannot get buffer; '") + name + "' is var-sized")); return get_data_buffer(name, buffer, buffer_size); } Status Query::get_buffer( const char* name, uint64_t** buffer_off, uint64_t** buffer_off_size, void** buffer_val, uint64_t** buffer_val_size) const { // Check attribute if (name == constants::coords) { return logger_->status( Status_QueryError("Cannot get buffer; Coordinates are not var-sized")); } if (array_schema_->attribute(name) == nullptr && array_schema_->dimension(name) == nullptr) return logger_->status(Status_QueryError( std::string("Cannot get buffer; Invalid attribute/dimension name '") + name + "'")); if (!array_schema_->var_size(name)) return logger_->status(Status_QueryError( std::string("Cannot get buffer; '") + name + "' is fixed-sized")); // Attribute or dimension auto it = buffers_.find(name); if (it != buffers_.end()) { *buffer_off = (uint64_t*)it->second.buffer_; *buffer_off_size = it->second.buffer_size_; *buffer_val = it->second.buffer_var_; *buffer_val_size = it->second.buffer_var_size_; return Status::Ok(); } // Named buffer does not exist *buffer_off = nullptr; *buffer_off_size = nullptr; *buffer_val = nullptr; *buffer_val_size = nullptr; return Status::Ok(); } Status Query::get_offsets_buffer( const char* name, uint64_t** buffer_off, uint64_t** buffer_off_size) const { // Check attribute if (name == constants::coords) { return logger_->status( Status_QueryError("Cannot get buffer; Coordinates are not var-sized")); } if (array_schema_->attribute(name) == nullptr && array_schema_->dimension(name) == nullptr) return logger_->status(Status_QueryError( std::string("Cannot get buffer; Invalid attribute/dimension name '") + name + "'")); if (!array_schema_->var_size(name)) return logger_->status(Status_QueryError( std::string("Cannot get buffer; '") + name + "' is fixed-sized")); // Attribute or dimension auto it = buffers_.find(name); if (it != buffers_.end()) { *buffer_off = (uint64_t*)it->second.buffer_; *buffer_off_size = it->second.buffer_size_; return Status::Ok(); } // Named buffer does not exist *buffer_off = nullptr; *buffer_off_size = nullptr; return Status::Ok(); } Status Query::get_data_buffer( const char* name, void** buffer, uint64_t** buffer_size) const { // Check attribute if (name != constants::coords) { if (array_schema_->attribute(name) == nullptr && array_schema_->dimension(name) == nullptr) return logger_->status(Status_QueryError( std::string("Cannot get buffer; Invalid attribute/dimension name '") + name + "'")); } // Special zipped coordinates if (type_ == QueryType::WRITE && name == constants::coords) { *buffer = coords_info_.coords_buffer_; *buffer_size = coords_info_.coords_buffer_size_; return Status::Ok(); } // Attribute or dimension auto it = buffers_.find(name); if (it != buffers_.end()) { if (!array_schema_->var_size(name)) { *buffer = it->second.buffer_; *buffer_size = it->second.buffer_size_; } else { *buffer = it->second.buffer_var_; *buffer_size = it->second.buffer_var_size_; } return Status::Ok(); } // Named buffer does not exist *buffer = nullptr; *buffer_size = nullptr; return Status::Ok(); } Status Query::get_validity_buffer( const char* name, uint8_t** buffer_validity_bytemap, uint64_t** buffer_validity_bytemap_size) const { // Check attribute if (!array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string("Cannot get buffer; '") + name + "' is non-nullable")); // Attribute or dimension auto it = buffers_.find(name); if (it != buffers_.end()) { auto vv = &it->second.validity_vector_; *buffer_validity_bytemap = vv->bytemap(); *buffer_validity_bytemap_size = vv->bytemap_size(); } return Status::Ok(); } Status Query::get_buffer_vbytemap( const char* name, uint64_t** buffer_off, uint64_t** buffer_off_size, void** buffer_val, uint64_t** buffer_val_size, uint8_t** buffer_validity_bytemap, uint64_t** buffer_validity_bytemap_size) const { const ValidityVector* vv = nullptr; RETURN_NOT_OK(get_buffer( name, buffer_off, buffer_off_size, buffer_val, buffer_val_size, &vv)); if (vv != nullptr) { *buffer_validity_bytemap = vv->bytemap(); *buffer_validity_bytemap_size = vv->bytemap_size(); } return Status::Ok(); } Status Query::get_buffer_vbytemap( const char* name, void** buffer, uint64_t** buffer_size, uint8_t** buffer_validity_bytemap, uint64_t** buffer_validity_bytemap_size) const { const ValidityVector* vv = nullptr; RETURN_NOT_OK(get_buffer(name, buffer, buffer_size, &vv)); if (vv != nullptr) { *buffer_validity_bytemap = vv->bytemap(); *buffer_validity_bytemap_size = vv->bytemap_size(); } return Status::Ok(); } Status Query::get_buffer( const char* name, void** buffer, uint64_t** buffer_size, const ValidityVector** validity_vector) const { // Check nullable attribute if (array_schema_->attribute(name) == nullptr) return logger_->status(Status_QueryError( std::string("Cannot get buffer; Invalid attribute name '") + name + "'")); if (array_schema_->var_size(name)) return logger_->status(Status_QueryError( std::string("Cannot get buffer; '") + name + "' is var-sized")); if (!array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string("Cannot get buffer; '") + name + "' is non-nullable")); // Attribute or dimension auto it = buffers_.find(name); if (it != buffers_.end()) { *buffer = it->second.buffer_; *buffer_size = it->second.buffer_size_; *validity_vector = &it->second.validity_vector_; return Status::Ok(); } // Named buffer does not exist *buffer = nullptr; *buffer_size = nullptr; *validity_vector = nullptr; return Status::Ok(); } Status Query::get_buffer( const char* name, uint64_t** buffer_off, uint64_t** buffer_off_size, void** buffer_val, uint64_t** buffer_val_size, const ValidityVector** validity_vector) const { // Check attribute if (array_schema_->attribute(name) == nullptr) return logger_->status(Status_QueryError( std::string("Cannot get buffer; Invalid attribute name '") + name + "'")); if (!array_schema_->var_size(name)) return logger_->status(Status_QueryError( std::string("Cannot get buffer; '") + name + "' is fixed-sized")); if (!array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string("Cannot get buffer; '") + name + "' is non-nullable")); // Attribute or dimension auto it = buffers_.find(name); if (it != buffers_.end()) { *buffer_off = (uint64_t*)it->second.buffer_; *buffer_off_size = it->second.buffer_size_; *buffer_val = it->second.buffer_var_; *buffer_val_size = it->second.buffer_var_size_; *validity_vector = &it->second.validity_vector_; return Status::Ok(); } // Named buffer does not exist *buffer_off = nullptr; *buffer_off_size = nullptr; *buffer_val = nullptr; *buffer_val_size = nullptr; *validity_vector = nullptr; return Status::Ok(); } Status Query::get_attr_serialization_state( const std::string& attribute, SerializationState::AttrState** state) { *state = &serialization_state_.attribute_states[attribute]; return Status::Ok(); } bool Query::has_results() const { if (status_ == QueryStatus::UNINITIALIZED || type_ == QueryType::WRITE) return false; for (const auto& it : buffers_) { if (*(it.second.buffer_size_) != 0) return true; } return false; } Status Query::init() { // Only if the query has not been initialized before if (status_ == QueryStatus::UNINITIALIZED) { // Check if the array got closed if (array_ == nullptr || !array_->is_open()) return logger_->status(Status_QueryError( "Cannot init query; The associated array is not open")); // Check if the array got re-opened with a different query type QueryType array_query_type; RETURN_NOT_OK(array_->get_query_type(&array_query_type)); if (array_query_type != type_) { std::stringstream errmsg; errmsg << "Cannot init query; " << "Associated array query type does not match query type: " << "(" << query_type_str(array_query_type) << " != " << query_type_str(type_) << ")"; return logger_->status(Status_QueryError(errmsg.str())); } RETURN_NOT_OK(check_buffer_names()); RETURN_NOT_OK(create_strategy()); RETURN_NOT_OK(strategy_->init()); } status_ = QueryStatus::INPROGRESS; return Status::Ok(); } URI Query::first_fragment_uri() const { if (type_ == QueryType::WRITE || fragment_metadata_.empty()) return URI(); return fragment_metadata_.front()->fragment_uri(); } URI Query::last_fragment_uri() const { if (type_ == QueryType::WRITE || fragment_metadata_.empty()) return URI(); return fragment_metadata_.back()->fragment_uri(); } Layout Query::layout() const { return layout_; } const QueryCondition* Query::condition() const { if (type_ == QueryType::WRITE) return nullptr; return &condition_; } Status Query::cancel() { status_ = QueryStatus::FAILED; return Status::Ok(); } Status Query::process() { if (status_ == QueryStatus::UNINITIALIZED) return logger_->status( Status_QueryError("Cannot process query; Query is not initialized")); status_ = QueryStatus::INPROGRESS; // Process query Status st = strategy_->dowork(); // Handle error if (!st.ok()) { status_ = QueryStatus::FAILED; return st; } if (type_ == QueryType::WRITE && layout_ == Layout::GLOBAL_ORDER) { // reset coord buffer marker at end of global write // this will allow for the user to properly set the next write batch coord_buffer_is_set_ = false; coord_data_buffer_is_set_ = false; coord_offsets_buffer_is_set_ = false; } // Check if the query is complete bool completed = !strategy_->incomplete(); // Handle callback and status if (completed) { if (callback_ != nullptr) callback_(callback_data_); status_ = QueryStatus::COMPLETED; } else { // Incomplete status_ = QueryStatus::INCOMPLETE; } return Status::Ok(); } Status Query::create_strategy() { if (type_ == QueryType::WRITE) { if (layout_ == Layout::COL_MAJOR || layout_ == Layout::ROW_MAJOR) { strategy_ = tdb_unique_ptr<IQueryStrategy>(tdb_new( OrderedWriter, stats_->create_child("Writer"), logger_, storage_manager_, array_, config_, buffers_, subarray_, layout_, written_fragment_info_, disable_check_global_order_, coords_info_, fragment_uri_)); } else if (layout_ == Layout::UNORDERED) { strategy_ = tdb_unique_ptr<IQueryStrategy>(tdb_new( UnorderedWriter, stats_->create_child("Writer"), logger_, storage_manager_, array_, config_, buffers_, subarray_, layout_, written_fragment_info_, disable_check_global_order_, coords_info_, fragment_uri_)); } else if (layout_ == Layout::GLOBAL_ORDER) { strategy_ = tdb_unique_ptr<IQueryStrategy>(tdb_new( GlobalOrderWriter, stats_->create_child("Writer"), logger_, storage_manager_, array_, config_, buffers_, subarray_, layout_, written_fragment_info_, disable_check_global_order_, coords_info_, fragment_uri_)); } else { assert(false); } } else { bool use_default = true; if (use_refactored_sparse_unordered_with_dups_reader() && !array_schema_->dense() && layout_ == Layout::UNORDERED && array_schema_->allows_dups()) { use_default = false; auto&& [st, non_overlapping_ranges]{Query::non_overlapping_ranges()}; RETURN_NOT_OK(st); if (*non_overlapping_ranges || !subarray_.is_set() || subarray_.range_num() == 1) { strategy_ = tdb_unique_ptr<IQueryStrategy>(tdb_new( SparseUnorderedWithDupsReader<uint8_t>, stats_->create_child("Reader"), logger_, storage_manager_, array_, config_, buffers_, subarray_, layout_, condition_)); } else { strategy_ = tdb_unique_ptr<IQueryStrategy>(tdb_new( SparseUnorderedWithDupsReader<uint64_t>, stats_->create_child("Reader"), logger_, storage_manager_, array_, config_, buffers_, subarray_, layout_, condition_)); } } else if ( use_refactored_sparse_global_order_reader() && !array_schema_->dense() && (layout_ == Layout::GLOBAL_ORDER || (layout_ == Layout::UNORDERED && subarray_.range_num() <= 1))) { // Using the reader for unordered queries to do deduplication. use_default = false; strategy_ = tdb_unique_ptr<IQueryStrategy>(tdb_new( SparseGlobalOrderReader, stats_->create_child("Reader"), logger_, storage_manager_, array_, config_, buffers_, subarray_, layout_, condition_)); } else if (use_refactored_dense_reader() && array_schema_->dense()) { bool all_dense = true; for (auto& frag_md : fragment_metadata_) all_dense &= frag_md->dense(); if (all_dense) { use_default = false; strategy_ = tdb_unique_ptr<IQueryStrategy>(tdb_new( DenseReader, stats_->create_child("Reader"), logger_, storage_manager_, array_, config_, buffers_, subarray_, layout_, condition_)); } } if (use_default) { strategy_ = tdb_unique_ptr<IQueryStrategy>(tdb_new( Reader, stats_->create_child("Reader"), logger_, storage_manager_, array_, config_, buffers_, subarray_, layout_, condition_)); } } if (strategy_ == nullptr) return logger_->status( Status_QueryError("Cannot create strategy; allocation failed")); return Status::Ok(); } IQueryStrategy* Query::strategy() { if (strategy_ == nullptr) { create_strategy(); } return strategy_.get(); } void Query::clear_strategy() { strategy_ = nullptr; } Status Query::disable_check_global_order() { if (status_ != QueryStatus::UNINITIALIZED) return logger_->status(Status_QueryError( "Cannot disable checking global order after initialization")); if (type_ == QueryType::READ) return logger_->status(Status_QueryError( "Cannot disable checking global order; Applicable only to writes")); disable_check_global_order_ = true; return Status::Ok(); } Status Query::check_buffer_names() { if (type_ == QueryType::WRITE) { // If the array is sparse, the coordinates must be provided if (!array_schema_->dense() && !coords_info_.has_coords_) return logger_->status(Status_WriterError( "Sparse array writes expect the coordinates of the " "cells to be written")); // If the layout is unordered, the coordinates must be provided if (layout_ == Layout::UNORDERED && !coords_info_.has_coords_) return logger_->status( Status_WriterError("Unordered writes expect the coordinates of the " "cells to be written")); // All attributes/dimensions must be provided auto expected_num = array_schema_->attribute_num(); expected_num += (coord_buffer_is_set_ || coord_data_buffer_is_set_ || coord_offsets_buffer_is_set_) ? array_schema_->dim_num() : 0; if (buffers_.size() != expected_num) return logger_->status( Status_WriterError("Writes expect all attributes (and coordinates in " "the sparse/unordered case) to be set")); } return Status::Ok(); } Status Query::check_set_fixed_buffer(const std::string& name) { if (name == constants::coords && !array_schema_->domain()->all_dims_same_type()) return logger_->status(Status_QueryError( "Cannot set buffer; Setting a buffer for zipped coordinates is not " "applicable to heterogeneous domains")); if (name == constants::coords && !array_schema_->domain()->all_dims_fixed()) return logger_->status(Status_QueryError( "Cannot set buffer; Setting a buffer for zipped coordinates is not " "applicable to domains with variable-sized dimensions")); return Status::Ok(); } Status Query::set_config(const Config& config) { config_ = config; // Refresh memory budget configuration. if (strategy_ != nullptr) RETURN_NOT_OK(strategy_->initialize_memory_budget()); // Set subarray's config for backwards compatibility // Users expect the query config to effect the subarray based on existing // behavior before subarray was exposed directly subarray_.set_config(config_); return Status::Ok(); } Status Query::set_coords_buffer(void* buffer, uint64_t* buffer_size) { // Set zipped coordinates buffer coords_info_.coords_buffer_ = buffer; coords_info_.coords_buffer_size_ = buffer_size; coords_info_.has_coords_ = true; return Status::Ok(); } Status Query::set_buffer( const std::string& name, void* const buffer, uint64_t* const buffer_size, const bool check_null_buffers) { RETURN_NOT_OK(check_set_fixed_buffer(name)); // Check buffer if (check_null_buffers && buffer == nullptr) return logger_->status( Status_QueryError("Cannot set buffer; " + name + " buffer is null")); // Check buffer size if (check_null_buffers && buffer_size == nullptr) return logger_->status( Status_QueryError("Cannot set buffer; " + name + " buffer is null")); // For easy reference const bool is_dim = array_schema_->is_dim(name); const bool is_attr = array_schema_->is_attr(name); // Check that attribute/dimension exists if (name != constants::coords && !is_dim && !is_attr) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Invalid attribute/dimension '") + name + "'")); // Must not be nullable if (array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input attribute/dimension '") + name + "' is nullable")); // Check that attribute/dimension is fixed-sized const bool var_size = (name != constants::coords && array_schema_->var_size(name)); if (var_size) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input attribute/dimension '") + name + "' is var-sized")); // Check if zipped coordinates coexist with separate coordinate buffers if ((is_dim && has_zipped_coords_buffer_) || (name == constants::coords && has_coords_buffer_)) return logger_->status(Status_QueryError( std::string("Cannot set separate coordinate buffers and " "a zipped coordinate buffer in the same query"))); // Error if setting a new attribute/dimension after initialization const bool exists = buffers_.find(name) != buffers_.end(); if (status_ != QueryStatus::UNINITIALIZED && !exists) return logger_->status(Status_QueryError( std::string("Cannot set buffer for new attribute/dimension '") + name + "' after initialization")); if (name == constants::coords) { has_zipped_coords_buffer_ = true; // Set special function for zipped coordinates buffer if (type_ == QueryType::WRITE) return set_coords_buffer(buffer, buffer_size); } if (is_dim && type_ == QueryType::WRITE) { // Check number of coordinates uint64_t coords_num = *buffer_size / array_schema_->cell_size(name); if (coord_buffer_is_set_ && coords_num != coords_info_.coords_num_) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input buffer for dimension '") + name + "' has a different number of coordinates than previously " "set coordinate buffers")); coords_info_.coords_num_ = coords_num; coord_buffer_is_set_ = true; coords_info_.has_coords_ = true; } has_coords_buffer_ |= is_dim; // Set attribute buffer buffers_[name].set_data_buffer(buffer, buffer_size); return Status::Ok(); } Status Query::set_data_buffer( const std::string& name, void* const buffer, uint64_t* const buffer_size, const bool check_null_buffers) { RETURN_NOT_OK(check_set_fixed_buffer(name)); // Check buffer if (check_null_buffers && buffer == nullptr) if (type_ != QueryType::WRITE || *buffer_size != 0) return logger_->status( Status_QueryError("Cannot set buffer; " + name + " buffer is null")); // Check buffer size if (check_null_buffers && buffer_size == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " buffer size is null")); // For easy reference const bool is_dim = array_schema_->is_dim(name); const bool is_attr = array_schema_->is_attr(name); // Check that attribute/dimension exists if (name != constants::coords && !is_dim && !is_attr) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Invalid attribute/dimension '") + name + "'")); if (array_schema_->dense() && type_ == QueryType::WRITE && !is_attr) { return logger_->status(Status_QueryError( std::string("Dense write queries cannot set dimension buffers"))); } // Check if zipped coordinates coexist with separate coordinate buffers if ((is_dim && has_zipped_coords_buffer_) || (name == constants::coords && has_coords_buffer_)) return logger_->status(Status_QueryError( std::string("Cannot set separate coordinate buffers and " "a zipped coordinate buffer in the same query"))); // Error if setting a new attribute/dimension after initialization const bool exists = buffers_.find(name) != buffers_.end(); if (status_ != QueryStatus::UNINITIALIZED && !exists) return logger_->status(Status_QueryError( std::string("Cannot set buffer for new attribute/dimension '") + name + "' after initialization")); if (name == constants::coords) { has_zipped_coords_buffer_ = true; // Set special function for zipped coordinates buffer if (type_ == QueryType::WRITE) return set_coords_buffer(buffer, buffer_size); } if (is_dim && type_ == QueryType::WRITE) { // Check number of coordinates uint64_t coords_num = *buffer_size / array_schema_->cell_size(name); if (coord_data_buffer_is_set_ && coords_num != coords_info_.coords_num_ && name == data_buffer_name_) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input buffer for dimension '") + name + "' has a different number of coordinates than previously " "set coordinate buffers")); coords_info_.coords_num_ = coords_num; coord_data_buffer_is_set_ = true; data_buffer_name_ = name; coords_info_.has_coords_ = true; } has_coords_buffer_ |= is_dim; // Set attribute/dimension buffer on the appropriate buffer if (!array_schema_->var_size(name)) // Fixed size data buffer buffers_[name].set_data_buffer(buffer, buffer_size); else // Var sized data buffer buffers_[name].set_data_var_buffer(buffer, buffer_size); return Status::Ok(); } Status Query::set_offsets_buffer( const std::string& name, uint64_t* const buffer_offsets, uint64_t* const buffer_offsets_size, const bool check_null_buffers) { RETURN_NOT_OK(check_set_fixed_buffer(name)); // Check buffer if (check_null_buffers && buffer_offsets == nullptr) return logger_->status( Status_QueryError("Cannot set buffer; " + name + " buffer is null")); // Check buffer size if (check_null_buffers && buffer_offsets_size == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " buffer size is null")); // For easy reference const bool is_dim = array_schema_->is_dim(name); const bool is_attr = array_schema_->is_attr(name); // Neither a dimension nor an attribute if (!is_dim && !is_attr) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Invalid buffer name '") + name + "' (it should be an attribute or dimension)")); // Error if it is fixed-sized if (!array_schema_->var_size(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input attribute/dimension '") + name + "' is fixed-sized")); // Error if setting a new attribute/dimension after initialization bool exists = buffers_.find(name) != buffers_.end(); if (status_ != QueryStatus::UNINITIALIZED && !exists) return logger_->status(Status_QueryError( std::string("Cannot set buffer for new attribute/dimension '") + name + "' after initialization")); if (is_dim && type_ == QueryType::WRITE) { // Check number of coordinates uint64_t coords_num = *buffer_offsets_size / constants::cell_var_offset_size; if (coord_offsets_buffer_is_set_ && coords_num != coords_info_.coords_num_ && name == offsets_buffer_name_) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input buffer for dimension '") + name + "' has a different number of coordinates than previously " "set coordinate buffers")); coords_info_.coords_num_ = coords_num; coord_offsets_buffer_is_set_ = true; coords_info_.has_coords_ = true; offsets_buffer_name_ = name; } has_coords_buffer_ |= is_dim; // Set attribute/dimension buffer buffers_[name].set_offsets_buffer(buffer_offsets, buffer_offsets_size); return Status::Ok(); } Status Query::set_validity_buffer( const std::string& name, uint8_t* const buffer_validity_bytemap, uint64_t* const buffer_validity_bytemap_size, const bool check_null_buffers) { RETURN_NOT_OK(check_set_fixed_buffer(name)); ValidityVector validity_vector; RETURN_NOT_OK(validity_vector.init_bytemap( buffer_validity_bytemap, buffer_validity_bytemap_size)); // Check validity buffer if (check_null_buffers && validity_vector.buffer() == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " validity buffer is null")); // Check validity buffer size if (check_null_buffers && validity_vector.buffer_size() == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " validity buffer size is null")); // Must be an attribute if (!array_schema_->is_attr(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Buffer name '") + name + "' is not an attribute")); // Must be nullable if (!array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input attribute '") + name + "' is not nullable")); // Error if setting a new attribute after initialization const bool exists = buffers_.find(name) != buffers_.end(); if (status_ != QueryStatus::UNINITIALIZED && !exists) return logger_->status(Status_QueryError( std::string("Cannot set buffer for new attribute '") + name + "' after initialization")); // Set attribute/dimension buffer buffers_[name].set_validity_buffer(std::move(validity_vector)); return Status::Ok(); } Status Query::set_buffer( const std::string& name, uint64_t* const buffer_off, uint64_t* const buffer_off_size, void* const buffer_val, uint64_t* const buffer_val_size, const bool check_null_buffers) { // Check buffer if (check_null_buffers && buffer_val == nullptr) if (type_ != QueryType::WRITE || *buffer_val_size != 0) return logger_->status( Status_QueryError("Cannot set buffer; " + name + " buffer is null")); // Check buffer size if (check_null_buffers && buffer_val_size == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " buffer size is null")); // Check offset buffer if (check_null_buffers && buffer_off == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " offset buffer is null")); // Check offset buffer size if (check_null_buffers && buffer_off_size == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " offset buffer size is null")); // For easy reference const bool is_dim = array_schema_->is_dim(name); const bool is_attr = array_schema_->is_attr(name); // Check that attribute/dimension exists if (!is_dim && !is_attr) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Invalid attribute/dimension '") + name + "'")); // Must not be nullable if (array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input attribute/dimension '") + name + "' is nullable")); // Check that attribute/dimension is var-sized if (!array_schema_->var_size(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input attribute/dimension '") + name + "' is fixed-sized")); // Error if setting a new attribute/dimension after initialization const bool exists = buffers_.find(name) != buffers_.end(); if (status_ != QueryStatus::UNINITIALIZED && !exists) return logger_->status(Status_QueryError( std::string("Cannot set buffer for new attribute/dimension '") + name + "' after initialization")); if (is_dim && type_ == QueryType::WRITE) { // Check number of coordinates uint64_t coords_num = *buffer_off_size / constants::cell_var_offset_size; if (coord_buffer_is_set_ && coords_num != coords_info_.coords_num_) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input buffer for dimension '") + name + "' has a different number of coordinates than previously " "set coordinate buffers")); coords_info_.coords_num_ = coords_num; coord_buffer_is_set_ = true; coords_info_.has_coords_ = true; } // Set attribute/dimension buffer buffers_[name].set_data_var_buffer(buffer_val, buffer_val_size); buffers_[name].set_offsets_buffer(buffer_off, buffer_off_size); return Status::Ok(); } Status Query::set_buffer_vbytemap( const std::string& name, void* const buffer, uint64_t* const buffer_size, uint8_t* const buffer_validity_bytemap, uint64_t* const buffer_validity_bytemap_size, const bool check_null_buffers) { // Convert the bytemap into a ValidityVector. ValidityVector vv; RETURN_NOT_OK( vv.init_bytemap(buffer_validity_bytemap, buffer_validity_bytemap_size)); return set_buffer( name, buffer, buffer_size, std::move(vv), check_null_buffers); } Status Query::set_buffer_vbytemap( const std::string& name, uint64_t* const buffer_off, uint64_t* const buffer_off_size, void* const buffer_val, uint64_t* const buffer_val_size, uint8_t* const buffer_validity_bytemap, uint64_t* const buffer_validity_bytemap_size, const bool check_null_buffers) { // Convert the bytemap into a ValidityVector. ValidityVector vv; RETURN_NOT_OK( vv.init_bytemap(buffer_validity_bytemap, buffer_validity_bytemap_size)); return set_buffer( name, buffer_off, buffer_off_size, buffer_val, buffer_val_size, std::move(vv), check_null_buffers); } Status Query::set_buffer( const std::string& name, void* const buffer, uint64_t* const buffer_size, ValidityVector&& validity_vector, const bool check_null_buffers) { RETURN_NOT_OK(check_set_fixed_buffer(name)); // Check buffer if (check_null_buffers && buffer == nullptr) return logger_->status( Status_QueryError("Cannot set buffer; " + name + " buffer is null")); // Check buffer size if (check_null_buffers && buffer_size == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " buffer size is null")); // Check validity buffer offset if (check_null_buffers && validity_vector.buffer() == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " validity buffer is null")); // Check validity buffer size if (check_null_buffers && validity_vector.buffer_size() == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " validity buffer size is null")); // Must be an attribute if (!array_schema_->is_attr(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Buffer name '") + name + "' is not an attribute")); // Must be fixed-size if (array_schema_->var_size(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input attribute '") + name + "' is var-sized")); // Must be nullable if (!array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input attribute '") + name + "' is not nullable")); // Error if setting a new attribute/dimension after initialization const bool exists = buffers_.find(name) != buffers_.end(); if (status_ != QueryStatus::UNINITIALIZED && !exists) return logger_->status(Status_QueryError( std::string("Cannot set buffer for new attribute '") + name + "' after initialization")); // Set attribute buffer buffers_[name].set_data_buffer(buffer, buffer_size); buffers_[name].set_validity_buffer(std::move(validity_vector)); return Status::Ok(); } Status Query::set_buffer( const std::string& name, uint64_t* const buffer_off, uint64_t* const buffer_off_size, void* const buffer_val, uint64_t* const buffer_val_size, ValidityVector&& validity_vector, const bool check_null_buffers) { // Check buffer if (check_null_buffers && buffer_val == nullptr) if (type_ != QueryType::WRITE || *buffer_val_size != 0) return logger_->status( Status_QueryError("Cannot set buffer; " + name + " buffer is null")); // Check buffer size if (check_null_buffers && buffer_val_size == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " buffer size is null")); // Check buffer offset if (check_null_buffers && buffer_off == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " offset buffer is null")); // Check buffer offset size if (check_null_buffers && buffer_off_size == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " offset buffer size is null")); ; // Check validity buffer offset if (check_null_buffers && validity_vector.buffer() == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " validity buffer is null")); // Check validity buffer size if (check_null_buffers && validity_vector.buffer_size() == nullptr) return logger_->status(Status_QueryError( "Cannot set buffer; " + name + " validity buffer size is null")); // Must be an attribute if (!array_schema_->is_attr(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Buffer name '") + name + "' is not an attribute")); // Must be var-size if (!array_schema_->var_size(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input attribute '") + name + "' is fixed-sized")); // Must be nullable if (!array_schema_->is_nullable(name)) return logger_->status(Status_QueryError( std::string("Cannot set buffer; Input attribute '") + name + "' is not nullable")); // Error if setting a new attribute after initialization const bool exists = buffers_.find(name) != buffers_.end(); if (status_ != QueryStatus::UNINITIALIZED && !exists) return logger_->status(Status_QueryError( std::string("Cannot set buffer for new attribute '") + name + "' after initialization")); // Set attribute/dimension buffer buffers_[name].set_data_var_buffer(buffer_val, buffer_val_size); buffers_[name].set_offsets_buffer(buffer_off, buffer_off_size); buffers_[name].set_validity_buffer(std::move(validity_vector)); return Status::Ok(); } Status Query::set_est_result_size( std::unordered_map<std::string, Subarray::ResultSize>& est_result_size, std::unordered_map<std::string, Subarray::MemorySize>& max_mem_size) { if (type_ == QueryType::WRITE) return logger_->status(Status_QueryError( "Cannot set estimated result size; Operation currently " "unsupported for write queries")); return subarray_.set_est_result_size(est_result_size, max_mem_size); } Status Query::set_layout_unsafe(Layout layout) { layout_ = layout; subarray_.set_layout(layout); return Status::Ok(); } Status Query::set_layout(Layout layout) { if (type_ == QueryType::READ && status_ != QueryStatus::UNINITIALIZED) return logger_->status( Status_QueryError("Cannot set layout after initialization")); if (layout == Layout::HILBERT) return logger_->status(Status_QueryError( "Cannot set layout; Hilbert order is not applicable to queries")); if (type_ == QueryType::WRITE && array_schema_->dense() && layout == Layout::UNORDERED) { return logger_->status(Status_QueryError( "Unordered writes are only possible for sparse arrays")); } layout_ = layout; subarray_.set_layout(layout); return Status::Ok(); } Status Query::set_condition(const QueryCondition& condition) { if (type_ == QueryType::WRITE) return logger_->status(Status_QueryError( "Cannot set query condition; Operation only applicable " "to read queries")); condition_ = condition; return Status::Ok(); } void Query::set_status(QueryStatus status) { status_ = status; } Status Query::set_subarray(const void* subarray) { if (!array_schema_->domain()->all_dims_same_type()) return logger_->status( Status_QueryError("Cannot set subarray; Function not applicable to " "heterogeneous domains")); if (!array_schema_->domain()->all_dims_fixed()) return logger_->status( Status_QueryError("Cannot set subarray; Function not applicable to " "domains with variable-sized dimensions")); // Prepare a subarray object Subarray sub(array_, layout_, stats_, logger_); if (subarray != nullptr) { auto dim_num = array_schema_->dim_num(); auto s_ptr = (const unsigned char*)subarray; uint64_t offset = 0; bool err_on_range_oob = true; if (type_ == QueryType::READ) { // Get read_range_oob config setting bool found = false; std::string read_range_oob_str = config()->get("sm.read_range_oob", &found); assert(found); if (read_range_oob_str != "error" && read_range_oob_str != "warn") return logger_->status(Status_QueryError( "Invalid value " + read_range_oob_str + " for sm.read_range_obb. Acceptable values are 'error' or " "'warn'.")); err_on_range_oob = read_range_oob_str == "error"; } for (unsigned d = 0; d < dim_num; ++d) { auto r_size = 2 * array_schema_->dimension(d)->coord_size(); Range range(&s_ptr[offset], r_size); RETURN_NOT_OK(sub.add_range(d, std::move(range), err_on_range_oob)); offset += r_size; } } if (type_ == QueryType::WRITE) { // Not applicable to sparse arrays if (!array_schema_->dense()) return logger_->status(Status_WriterError( "Setting a subarray is not supported in sparse writes")); // Subarray must be unary for dense writes if (sub.range_num() != 1) return logger_->status( Status_WriterError("Cannot set subarray; Multi-range dense writes " "are not supported")); if (strategy_ != nullptr) strategy_->reset(); } subarray_ = sub; status_ = QueryStatus::UNINITIALIZED; return Status::Ok(); } const Subarray* Query::subarray() const { return &subarray_; } Status Query::set_subarray_unsafe(const Subarray& subarray) { subarray_ = subarray; return Status::Ok(); } Status Query::set_subarray(const tiledb::sm::Subarray& subarray) { auto query_status = status(); if (query_status != tiledb::sm::QueryStatus::UNINITIALIZED && query_status != tiledb::sm::QueryStatus::COMPLETED) { // Can be in this initialized state when query has been de-serialized // server-side and are trying to perform local submit. // Don't change anything and return indication of success. return Status::Ok(); } // Set subarray if (!subarray.is_set()) // Nothing useful to set here, will leave query with its current // settings and consider successful. return Status::Ok(); auto prev_layout = subarray_.layout(); subarray_ = subarray; subarray_.set_layout(prev_layout); status_ = QueryStatus::UNINITIALIZED; return Status::Ok(); } Status Query::set_subarray_unsafe(const NDRange& subarray) { // Prepare a subarray object Subarray sub(array_, layout_, stats_, logger_); if (!subarray.empty()) { auto dim_num = array_schema_->dim_num(); for (unsigned d = 0; d < dim_num; ++d) RETURN_NOT_OK(sub.add_range_unsafe(d, subarray[d])); } assert(layout_ == sub.layout()); subarray_ = sub; status_ = QueryStatus::UNINITIALIZED; return Status::Ok(); } Status Query::check_buffers_correctness() { // Iterate through each attribute for (auto& attr : buffer_names()) { if (array_schema_->var_size(attr)) { // Check for data buffer under buffer_var and offsets buffer under buffer if (type_ == QueryType::READ) { if (buffer(attr).buffer_var_ == nullptr) { return logger_->status(Status_QueryError( std::string("Var-Sized input attribute/dimension '") + attr + "' is not set correctly. \nVar size buffer is not set.")); } } else { if (buffer(attr).buffer_var_ == nullptr && *buffer(attr).buffer_var_size_ != 0) { return logger_->status(Status_QueryError( std::string("Var-Sized input attribute/dimension '") + attr + "' is not set correctly. \nVar size buffer is not set and buffer " "size if not 0.")); } } if (buffer(attr).buffer_ == nullptr) { return logger_->status(Status_QueryError( std::string("Var-Sized input attribute/dimension '") + attr + "' is not set correctly. \nOffsets buffer is not set.")); } } else { // Fixed sized if (buffer(attr).buffer_ == nullptr) { return logger_->status(Status_QueryError( std::string("Fix-Sized input attribute/dimension '") + attr + "' is not set correctly. \nData buffer is not set.")); } } if (array_schema_->is_nullable(attr)) { bool exists_validity = buffer(attr).validity_vector_.buffer() != nullptr; if (!exists_validity) { return logger_->status(Status_QueryError( std::string("Nullable input attribute/dimension '") + attr + "' is not set correctly \nValidity buffer is not set")); } } } return Status::Ok(); } Status Query::submit() { // Do not resubmit completed reads. if (type_ == QueryType::READ && status_ == QueryStatus::COMPLETED) { return Status::Ok(); } // Check attribute/dimensions buffers completeness before query submits RETURN_NOT_OK(check_buffers_correctness()); if (array_->is_remote()) { auto rest_client = storage_manager_->rest_client(); if (rest_client == nullptr) return logger_->status(Status_QueryError( "Error in query submission; remote array with no rest client.")); if (status_ == QueryStatus::UNINITIALIZED) { RETURN_NOT_OK(create_strategy()); RETURN_NOT_OK(strategy_->init()); } return rest_client->submit_query_to_rest(array_->array_uri(), this); } RETURN_NOT_OK(init()); return storage_manager_->query_submit(this); } Status Query::submit_async( std::function<void(void*)> callback, void* callback_data) { // Do not resubmit completed reads. if (type_ == QueryType::READ && status_ == QueryStatus::COMPLETED) { callback(callback_data); return Status::Ok(); } RETURN_NOT_OK(init()); if (array_->is_remote()) return logger_->status( Status_QueryError("Error in async query submission; async queries not " "supported for remote arrays.")); callback_ = callback; callback_data_ = callback_data; return storage_manager_->query_submit_async(this); } QueryStatus Query::status() const { return status_; } QueryStatusDetailsReason Query::status_incomplete_reason() const { if (strategy_ != nullptr) return strategy_->status_incomplete_reason(); return QueryStatusDetailsReason::REASON_NONE; } QueryType Query::type() const { return type_; } const Config* Query::config() const { return &config_; } stats::Stats* Query::stats() const { return stats_; } tdb_shared_ptr<Buffer> Query::rest_scratch() const { return rest_scratch_; } bool Query::use_refactored_dense_reader() { bool use_refactored_readers = false; bool found = false; // First check for legacy option config_.get<bool>( "sm.use_refactored_readers", &use_refactored_readers, &found); // If the legacy/deprecated option is set use it over the new parameters // This facilitates backwards compatibility if (found) { logger_->warn( "sm.use_refactored_readers config option is deprecated.\nPlease use " "'sm.query.dense.reader' with value of 'refactored' or 'legacy'"); return use_refactored_readers; } const std::string& val = config_.get("sm.query.dense.reader", &found); assert(found); return val == "refactored"; } bool Query::use_refactored_sparse_global_order_reader() { bool use_refactored_readers = false; bool found = false; // First check for legacy option config_.get<bool>( "sm.use_refactored_readers", &use_refactored_readers, &found); // If the legacy/deprecated option is set use it over the new parameters // This facilitates backwards compatibility if (found) { logger_->warn( "sm.use_refactored_readers config option is deprecated.\nPlease use " "'sm.query.sparse_global_order.reader' with value of 'refactored' or " "'legacy'"); return use_refactored_readers; } const std::string& val = config_.get("sm.query.sparse_global_order.reader", &found); assert(found); return val == "refactored"; } bool Query::use_refactored_sparse_unordered_with_dups_reader() { bool use_refactored_readers = false; bool found = false; // First check for legacy option config_.get<bool>( "sm.use_refactored_readers", &use_refactored_readers, &found); // If the legacy/deprecated option is set use it over the new parameters // This facilitates backwards compatibility if (found) { logger_->warn( "sm.use_refactored_readers config option is deprecated.\nPlease use " "'sm.query.sparse_unordered_with_dups.reader' with value of " "'refactored' or 'legacy'"); return use_refactored_readers; } const std::string& val = config_.get("sm.query.sparse_unordered_with_dups.reader", &found); assert(found); return val == "refactored"; } tuple<Status, optional<bool>> Query::non_overlapping_ranges() { return subarray_.non_overlapping_ranges(storage_manager_->compute_tp()); } /* ****************************** */ /* PRIVATE METHODS */ /* ****************************** */ } // namespace sm } // namespace tiledb<|fim▁end|>
if (stride != nullptr) return logger_->status(Status_QueryError( "Cannot add range; Setting range stride is currently unsupported"));
<|file_name|>ConfigureDryRunError.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # # Copyright (c) 2001 - 2016 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to<|fim▁hole|># # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "test/Configure/ConfigureDryRunError.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog" """ Verify the ConfigureDryRunError. """ import os import TestSCons _obj = TestSCons._obj test = TestSCons.TestSCons() lib = test.Configure_lib NCR = test.NCR # non-cached rebuild CR = test.CR # cached rebuild (up to date) NCF = test.NCF # non-cached build failure CF = test.CF # cached build failure SConstruct_path = test.workpath('SConstruct') test.write(SConstruct_path, """ env = Environment() import os env.AppendENVPath('PATH', os.environ['PATH']) conf = Configure(env) r1 = conf.CheckLib('%s') # will pass r2 = conf.CheckLib('hopefullynolib') # will fail env = conf.Finish() if not (r1 and not r2): Exit(1) """ % (lib)) expect = """ scons: *** Cannot create configure directory ".sconf_temp" within a dry-run. """ + test.python_file_line(SConstruct_path, 5) test.run(arguments='-n', status=2, stderr=expect) test.must_not_exist('config.log') test.subdir('.sconf_temp') conftest_0_c = os.path.join(".sconf_temp", "conftest_0.c") SConstruct_file_line = test.python_file_line(SConstruct_path, 6)[:-1] expect = """ scons: *** Cannot update configure test "%(conftest_0_c)s" within a dry-run. %(SConstruct_file_line)s """ % locals() test.run(arguments='-n', status=2, stderr=expect) test.run() test.checkLogAndStdout( ["Checking for C library %s... " % lib, "Checking for C library hopefullynolib... "], ["yes", "no"], [[((".c", NCR), (_obj, NCR))], [((".c", NCR), (_obj, NCF))]], "config.log", ".sconf_temp", "SConstruct") oldLog = test.read(test.workpath('config.log')) test.run(arguments='-n') test.checkLogAndStdout( ["Checking for C library %s... " % lib, "Checking for C library hopefullynolib... "], ["yes", "no"], [[((".c", CR), (_obj, CR))], [((".c", CR), (_obj, CF))]], "config.log", ".sconf_temp", "SConstruct", doCheckLog=0) newLog = test.read(test.workpath('config.log')) if newLog != oldLog: print "Unexpected update of log file within a dry run" test.fail_test() test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:<|fim▁end|>
# permit persons to whom the Software is furnished to do so, subject to # the following conditions:
<|file_name|>listener.go<|end_file_name|><|fim▁begin|>package elastic_load_balancing import ( "github.com/jagregory/cfval/constraints" . "github.com/jagregory/cfval/schema" ) // see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html var listener = NestedResource{ Description: "ElasticLoadBalancing Listener", Properties: Properties{ "InstancePort": Schema{ Type: ValueString, Required: constraints.Always, }, "InstanceProtocol": Schema{ Type: instanceProtocol, // TODO: // * If the front-end protocol is HTTP or HTTPS, InstanceProtocol has to // be at the same protocol layer, i.e., HTTP or HTTPS. Likewise, if the // front-end protocol is TCP or SSL, InstanceProtocol has to be TCP // or SSL.<|fim▁hole|> // InstanceProtocol has to be secure, i.e., HTTPS or SSL. If there is // another listener with the same InstancePort whose InstanceProtocol is // HTTP or TCP, the listener's InstanceProtocol must be either HTTP // or TCP. }, "LoadBalancerPort": Schema{ Type: ValueString, Required: constraints.Always, }, "PolicyNames": Schema{ Type: Multiple(ValueString), }, "Protocol": Schema{ Required: constraints.Always, Type: instanceProtocol, }, "SSLCertificateId": Schema{ Type: ValueString, }, }, }<|fim▁end|>
// * If there is another listener with the same InstancePort whose // InstanceProtocol is secure, i.e., HTTPS or SSL, the listener's
<|file_name|>ckan_search.py<|end_file_name|><|fim▁begin|>import os import time import mechanize CKAN = os.environ.get('CKAN', 'http://data.england.nhs.uk/')<|fim▁hole|> class Transaction(object): def __init__(self): self.custom_timers = {} def run(self): # create a Browser instance br = mechanize.Browser() # don't bother with robots.txt br.set_handle_robots(False) # add a custom header so CKAN allows our requests br.addheaders = [('User-agent', 'Mozilla/5.0 Compatible')] # start the timer start_timer = time.time() # submit the request br.open(CKAN) # stop the timer latency = time.time() - start_timer # store the custom timer self.custom_timers['Load_Front_Page'] = latency # think-time time.sleep(2) # select first (zero-based) form on page br.select_form(nr=0) # set form field br.form['q'] = 'england' start_timer = time.time() br.submit() assert 'datasets found for' in br.response().read(), 'Search not performed' # verify responses are valid assert (br.response().code == 200), 'Bad HTTP Response' latency = time.time() - start_timer # store the custom timer self.custom_timers['Search'] = latency # think-time time.sleep(2) if __name__ == '__main__': trans = Transaction() trans.run() for timer in trans.custom_timers: print '%s: %.5f secs' % (timer, trans.custom_timers[timer])<|fim▁end|>
<|file_name|>types.js<|end_file_name|><|fim▁begin|><|fim▁hole|>export const LOGGED_OUT = 'LOGGED_OUT';<|fim▁end|>
export const LOGGED_IN = 'LOGGED_IN';
<|file_name|>distributions.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- ## # distributions.py: module for probability distributions. ## # © 2017, Chris Ferrie ([email protected]) and # Christopher Granade ([email protected]). # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ## ## IMPORTS ################################################################### from __future__ import division from __future__ import absolute_import from builtins import range from future.utils import with_metaclass import numpy as np import scipy.stats as st import scipy.linalg as la from scipy.interpolate import interp1d from scipy.integrate import cumtrapz from scipy.spatial import ConvexHull, Delaunay from functools import partial import abc from qinfer import utils as u from qinfer.metrics import rescaled_distance_mtx from qinfer.clustering import particle_clusters from qinfer._exceptions import ApproximationWarning import warnings ## EXPORTS ################################################################### __all__ = [ 'Distribution', 'SingleSampleMixin', 'MixtureDistribution', 'ParticleDistribution', 'ProductDistribution', 'UniformDistribution', 'DiscreteUniformDistribution', 'MVUniformDistribution', 'ConstantDistribution', 'NormalDistribution', 'MultivariateNormalDistribution', 'SlantedNormalDistribution', 'LogNormalDistribution', 'BetaDistribution', 'DirichletDistribution', 'BetaBinomialDistribution', 'GammaDistribution', 'GinibreUniform', 'HaarUniform', 'HilbertSchmidtUniform', 'PostselectedDistribution', 'ConstrainedSumDistribution', 'InterpolatedUnivariateDistribution' ] ## FUNCTIONS ################################################################# def scipy_dist(name, *args, **kwargs): """ Wraps calling a scipy.stats distribution to allow for pickling. See https://github.com/scipy/scipy/issues/3125. """ return getattr(st, name)(*args, **kwargs) ## ABSTRACT CLASSES AND MIXINS ############################################### class Distribution(with_metaclass(abc.ABCMeta, object)): """ Abstract base class for probability distributions on one or more random variables. """ @abc.abstractproperty def n_rvs(self): """ The number of random variables that this distribution is over. :type: `int` """ pass @abc.abstractmethod def sample(self, n=1): """ Returns one or more samples from this probability distribution. :param int n: Number of samples to return. :rtype: numpy.ndarray :return: An array containing samples from the distribution of shape ``(n, d)``, where ``d`` is the number of random variables. """ pass class SingleSampleMixin(with_metaclass(abc.ABCMeta, object)): """ Mixin class that extends a class so as to generate multiple samples correctly, given a method ``_sample`` that generates one sample at a time. """ @abc.abstractmethod def _sample(self): pass def sample(self, n=1): samples = np.zeros((n, self.n_rvs)) for idx in range(n): samples[idx, :] = self._sample() return samples ## CLASSES ################################################################### class MixtureDistribution(Distribution): r""" Samples from a weighted list of distributions. :param weights: Length ``n_dist`` list or ``np.ndarray`` of probabilites summing to 1. :param dist: Either a length ``n_dist`` list of ``Distribution`` instances, or a ``Distribution`` class, for example, ``NormalDistribution``. It is assumed that a list of ``Distribution``s all have the same ``n_rvs``. :param dist_args: If ``dist`` is a class, an array of shape ``(n_dist, n_rvs)`` where ``dist_args[k,:]`` defines the arguments of the k'th distribution. Use ``None`` if the distribution has no arguments. :param dist_kw_args: If ``dist`` is a class, a dictionary where each key's value is an array of shape ``(n_dist, n_rvs)`` where ``dist_kw_args[key][k,:]`` defines the keyword argument corresponding to ``key`` of the k'th distribution. Use ``None`` if the distribution needs no keyword arguments. :param bool shuffle: Whether or not to shuffle result after sampling. Not shuffling will result in variates being in the same order as the distributions. Default is ``True``. """ def __init__(self, weights, dist, dist_args=None, dist_kw_args=None, shuffle=True): super(MixtureDistribution, self).__init__() self._weights = weights self._n_dist = len(weights) self._shuffle = shuffle try: self._example_dist = dist[0] self._is_dist_list = True self._dist_list = dist assert(self._n_dist == len(self._dist_list)) except: self._is_dist_list = False self._dist = dist self._dist_args = dist_args self._dist_kw_args = dist_kw_args assert(self._n_dist == self._dist_args.shape[0]) self._example_dist = self._dist( *self._dist_arg(0), **self._dist_kw_arg(0) ) def _dist_arg(self, k): """ Returns the arguments for the k'th distribution. :param int k: Index of distribution in question. :rtype: ``np.ndarary`` """ if self._dist_args is not None: return self._dist_args[k,:] else: return [] def _dist_kw_arg(self, k): """ Returns a dictionary of keyword arguments for the k'th distribution. :param int k: Index of the distribution in question. :rtype: ``dict`` """ if self._dist_kw_args is not None: return { key:self._dist_kw_args[key][k,:] for key in self._dist_kw_args.keys() } else: return {} @property def n_rvs(self): return self._example_dist.n_rvs @property def n_dist(self): """ The number of distributions in the mixture distribution. """ return self._n_dist def sample(self, n=1): # how many samples to take from each dist ns = np.random.multinomial(n, self._weights) idxs = np.arange(self.n_dist)[ns > 0] if self._is_dist_list: # sample from each distribution samples = np.concatenate([ self._dist_list[k].sample(n=ns[k]) for k in idxs ]) else: # instantiate each distribution and then sample samples = np.concatenate([ self._dist( *self._dist_arg(k), **self._dist_kw_arg(k) ).sample(n=ns[k]) for k in idxs ]) # in-place shuffling if self._shuffle: np.random.shuffle(samples) return samples class ParticleDistribution(Distribution): r""" A distribution consisting of a list of weighted vectors. Note that either `n_mps` or both (`particle_locations`, `particle_weights`) must be specified, or an error will be raised. :param numpy.ndarray particle_weights: Length ``n_particles`` list of particle weights. :param particle_locations: Shape ``(n_particles, n_mps)`` array of particle locations. :param int n_mps: Dimension of parameter space. This parameter should only be set when `particle_weights` and `particle_locations` are not set (and vice versa). """ def __init__(self, n_mps=None, particle_locations=None, particle_weights=None): super(ParticleDistribution, self).__init__() if particle_locations is None or particle_weights is None: # Initialize with single particle at origin. self.particle_locations = np.zeros((1, n_mps)) self.particle_weights = np.ones((1,)) elif n_mps is None: self.particle_locations = particle_locations self.particle_weights = np.abs(particle_weights) self.particle_weights = self.particle_weights / np.sum(self.particle_weights) else: raise ValueError('Either the dimension of parameter space, `n_mps`, or the particles, `particle_locations` and `particle_weights` must be specified.') @property def n_particles(self): """ Returns the number of particles in the distribution :type: `int` """ return self.particle_locations.shape[0] @property def n_ess(self): """ Returns the effective sample size (ESS) of the current particle distribution. :type: `float` :return: The effective sample size, given by :math:`1/\sum_i w_i^2`. """ return 1 / (np.sum(self.particle_weights**2)) ## DISTRIBUTION CONTRACT ## @property def n_rvs(self): """ Returns the dimension of each particle. :type: `int` """ return self.particle_locations.shape[1] def sample(self, n=1): """ Returns random samples from the current particle distribution according to particle weights. :param int n: The number of samples to draw. :return: The sampled model parameter vectors. :rtype: `~numpy.ndarray` of shape ``(n, updater.n_rvs)``. """ cumsum_weights = np.cumsum(self.particle_weights) return self.particle_locations[np.minimum(cumsum_weights.searchsorted( np.random.random((n,)), side='right' ), len(cumsum_weights) - 1)] ## MOMENT FUNCTIONS ## @staticmethod def particle_mean(weights, locations): r""" Returns the arithmetic mean of the `locations` weighted by `weights` :param numpy.ndarray weights: Weights of each particle in array of shape ``(n_particles,)``.<|fim▁hole|> :returns: An array containing the mean """ return np.dot(weights, locations) @classmethod def particle_covariance_mtx(cls, weights, locations): """ Returns an estimate of the covariance of a distribution represented by a given set of SMC particle. :param weights: An array of shape ``(n_particles,)`` containing the weights of each particle. :param location: An array of shape ``(n_particles, n_modelparams)`` containing the locations of each particle. :rtype: :class:`numpy.ndarray`, shape ``(n_modelparams, n_modelparams)``. :returns: An array containing the estimated covariance matrix. """ # Find the mean model vector, shape (n_modelparams, ). mu = cls.particle_mean(weights, locations) # Transpose the particle locations to have shape # (n_modelparams, n_particles). xs = locations.transpose([1, 0]) # Give a shorter name to the particle weights, shape (n_particles, ). ws = weights cov = ( # This sum is a reduction over the particle index, chosen to be # axis=2. Thus, the sum represents an expectation value over the # outer product $x . x^T$. # # All three factors have the particle index as the rightmost # index, axis=2. Using the Einstein summation convention (ESC), # we can reduce over the particle index easily while leaving # the model parameter index to vary between the two factors # of xs. # # This corresponds to evaluating A_{m,n} = w_{i} x_{m,i} x_{n,i} # using the ESC, where A_{m,n} is the temporary array created. np.einsum('i,mi,ni', ws, xs, xs) # We finish by subracting from the above expectation value # the outer product $mu . mu^T$. - np.dot(mu[..., np.newaxis], mu[np.newaxis, ...]) ) # The SMC approximation is not guaranteed to produce a # positive-semidefinite covariance matrix. If a negative eigenvalue # is produced, we should warn the caller of this. assert np.all(np.isfinite(cov)) if not np.all(la.eig(cov)[0] >= 0): warnings.warn('Numerical error in covariance estimation causing positive semidefinite violation.', ApproximationWarning) return cov def est_mean(self): """ Returns the mean value of the current particle distribution. :rtype: :class:`numpy.ndarray`, shape ``(n_mps,)``. :returns: An array containing the an estimate of the mean model vector. """ return self.particle_mean(self.particle_weights, self.particle_locations) def est_meanfn(self, fn): """ Returns an the expectation value of a given function :math:`f` over the current particle distribution. Here, :math:`f` is represented by a function ``fn`` that is vectorized over particles, such that ``f(modelparams)`` has shape ``(n_particles, k)``, where ``n_particles = modelparams.shape[0]``, and where ``k`` is a positive integer. :param callable fn: Function implementing :math:`f` in a vectorized manner. (See above.) :rtype: :class:`numpy.ndarray`, shape ``(k, )``. :returns: An array containing the an estimate of the mean of :math:`f`. """ return np.einsum('i...,i...', self.particle_weights, fn(self.particle_locations) ) def est_covariance_mtx(self, corr=False): """ Returns the full-rank covariance matrix of the current particle distribution. :param bool corr: If `True`, the covariance matrix is normalized by the outer product of the square root diagonal of the covariance matrix, i.e. the correlation matrix is returned instead. :rtype: :class:`numpy.ndarray`, shape ``(n_modelparams, n_modelparams)``. :returns: An array containing the estimated covariance matrix. """ cov = self.particle_covariance_mtx(self.particle_weights, self.particle_locations) if corr: dstd = np.sqrt(np.diag(cov)) cov /= (np.outer(dstd, dstd)) return cov ## INFORMATION QUANTITIES ## def est_entropy(self): r""" Estimates the entropy of the current particle distribution as :math:`-\sum_i w_i \log w_i` where :math:`\{w_i\}` is the set of particles with nonzero weight. """ nz_weights = self.particle_weights[self.particle_weights > 0] return -np.sum(np.log(nz_weights) * nz_weights) def _kl_divergence(self, other_locs, other_weights, kernel=None, delta=1e-2): """ Finds the KL divergence between this and another particle distribution by using a kernel density estimator to smooth over the other distribution's particles. """ if kernel is None: kernel = st.norm(loc=0, scale=1).pdf dist = rescaled_distance_mtx(self, other_locs) / delta K = kernel(dist) return -self.est_entropy() - (1 / delta) * np.sum( self.particle_weights * np.log( np.sum( other_weights * K, axis=1 # Sum over the particles of ``other``. ) ), axis=0 # Sum over the particles of ``self``. ) def est_kl_divergence(self, other, kernel=None, delta=1e-2): """ Finds the KL divergence between this and another particle distribution by using a kernel density estimator to smooth over the other distribution's particles. :param SMCUpdater other: """ return self._kl_divergence( other.particle_locations, other.particle_weights, kernel, delta ) ## CLUSTER ESTIMATION METHODS ############################################# def est_cluster_moments(self, cluster_opts=None): # TODO: document if cluster_opts is None: cluster_opts = {} for cluster_label, cluster_particles in particle_clusters( self.particle_locations, self.particle_weights, **cluster_opts ): w = self.particle_weights[cluster_particles] l = self.particle_locations[cluster_particles] yield ( cluster_label, sum(w), # The zeroth moment is very useful here! self.particle_mean(w, l), self.particle_covariance_mtx(w, l) ) def est_cluster_covs(self, cluster_opts=None): # TODO: document cluster_moments = np.array( list(self.est_cluster_moments(cluster_opts=cluster_opts)), dtype=[ ('label', 'int'), ('weight', 'float64'), ('mean', '{}float64'.format(self.n_rvs)), ('cov', '{0},{0}float64'.format(self.n_rvs)), ]) ws = cluster_moments['weight'][:, np.newaxis, np.newaxis] within_cluster_var = np.sum(ws * cluster_moments['cov'], axis=0) between_cluster_var = self.particle_covariance_mtx( # Treat the cluster means as a new very small particle cloud. cluster_moments['weight'], cluster_moments['mean'] ) total_var = within_cluster_var + between_cluster_var return within_cluster_var, between_cluster_var, total_var def est_cluster_metric(self, cluster_opts=None): """ Returns an estimate of how much of the variance in the current posterior can be explained by a separation between *clusters*. """ wcv, bcv, tv = self.est_cluster_covs(cluster_opts) return np.diag(bcv) / np.diag(tv) ## REGION ESTIMATION METHODS ############################################## def est_credible_region(self, level=0.95, return_outside=False, modelparam_slice=None): """ Returns an array containing particles inside a credible region of a given level, such that the described region has probability mass no less than the desired level. Particles in the returned region are selected by including the highest- weight particles first until the desired credibility level is reached. :param float level: Crediblity level to report. :param bool return_outside: If `True`, the return value is a tuple of the those particles within the credible region, and the rest of the posterior particle cloud. :param slice modelparam_slice: Slice over which model parameters to consider. :rtype: :class:`numpy.ndarray`, shape ``(n_credible, n_mps)``, where ``n_credible`` is the number of particles in the credible region and ``n_mps`` corresponds to the size of ``modelparam_slice``. If ``return_outside`` is ``True``, this method instead returns tuple ``(inside, outside)`` where ``inside`` is as described above, and ``outside`` has shape ``(n_particles-n_credible, n_mps)``. :return: An array of particles inside the estimated credible region. Or, if ``return_outside`` is ``True``, both the particles inside and the particles outside, as a tuple. """ # which slice of modelparams to take s_ = np.s_[modelparam_slice] if modelparam_slice is not None else np.s_[:] mps = self.particle_locations[:, s_] # Start by sorting the particles by weight. # We do so by obtaining an array of indices `id_sort` such that # `particle_weights[id_sort]` is in descending order. id_sort = np.argsort(self.particle_weights)[::-1] # Find the cummulative sum of the sorted weights. cumsum_weights = np.cumsum(self.particle_weights[id_sort]) # Find all the indices where the sum is less than level. # We first find id_cred such that # `all(cumsum_weights[id_cred] <= level)`. id_cred = cumsum_weights <= level # By construction, by adding the next particle to id_cred, it must be # true that `cumsum_weights[id_cred] >= level`, as required. id_cred[np.sum(id_cred)] = True # We now return a slice onto the particle_locations by first permuting # the particles according to the sort order, then by selecting the # credible particles. if return_outside: return ( mps[id_sort][id_cred], mps[id_sort][np.logical_not(id_cred)] ) else: return mps[id_sort][id_cred] def region_est_hull(self, level=0.95, modelparam_slice=None): """ Estimates a credible region over models by taking the convex hull of a credible subset of particles. :param float level: The desired crediblity level (see :meth:`SMCUpdater.est_credible_region`). :param slice modelparam_slice: Slice over which model parameters to consider. :return: The tuple ``(faces, vertices)`` where ``faces`` describes all the vertices of all of the faces on the exterior of the convex hull, and ``vertices`` is a list of all vertices on the exterior of the convex hull. :rtype: ``faces`` is a ``numpy.ndarray`` with shape ``(n_face, n_mps, n_mps)`` and indeces ``(idx_face, idx_vertex, idx_mps)`` where ``n_mps`` corresponds to the size of ``modelparam_slice``. ``vertices`` is an ``numpy.ndarray`` of shape ``(n_vertices, n_mps)``. """ points = self.est_credible_region( level=level, modelparam_slice=modelparam_slice ) hull = ConvexHull(points) return points[hull.simplices], points[u.uniquify(hull.vertices.flatten())] def region_est_ellipsoid(self, level=0.95, tol=0.0001, modelparam_slice=None): r""" Estimates a credible region over models by finding the minimum volume enclosing ellipse (MVEE) of a credible subset of particles. :param float level: The desired crediblity level (see :meth:`SMCUpdater.est_credible_region`). :param float tol: The allowed error tolerance in the MVEE optimization (see :meth:`~qinfer.utils.mvee`). :param slice modelparam_slice: Slice over which model parameters to consider. :return: A tuple ``(A, c)`` where ``A`` is the covariance matrix of the ellipsoid and ``c`` is the center. A point :math:`\vec{x}` is in the ellipsoid whenever :math:`(\vec{x}-\vec{c})^{T}A^{-1}(\vec{x}-\vec{c})\leq 1`. :rtype: ``A`` is ``np.ndarray`` of shape ``(n_mps,n_mps)`` and ``centroid`` is ``np.ndarray`` of shape ``(n_mps)``. ``n_mps`` corresponds to the size of ``param_slice``. """ _, vertices = self.region_est_hull(level=level, modelparam_slice=modelparam_slice) A, centroid = u.mvee(vertices, tol) return A, centroid def in_credible_region(self, points, level=0.95, modelparam_slice=None, method='hpd-hull', tol=0.0001): """ Decides whether each of the points lie within a credible region of the current distribution. If ``tol`` is ``None``, the particles are tested directly against the convex hull object. If ``tol`` is a positive ``float``, particles are tested to be in the interior of the smallest enclosing ellipsoid of this convex hull, see :meth:`SMCUpdater.region_est_ellipsoid`. :param np.ndarray points: An ``np.ndarray`` of shape ``(n_mps)`` for a single point, or of shape ``(n_points, n_mps)`` for multiple points, where ``n_mps`` corresponds to the same dimensionality as ``param_slice``. :param float level: The desired crediblity level (see :meth:`SMCUpdater.est_credible_region`). :param str method: A string specifying which credible region estimator to use. One of ``'pce'``, ``'hpd-hull'`` or ``'hpd-mvee'`` (see below). :param float tol: The allowed error tolerance for those methods which require a tolerance (see :meth:`~qinfer.utils.mvee`). :param slice modelparam_slice: A slice describing which model parameters to consider in the credible region, effectively marginizing out the remaining parameters. By default, all model parameters are included. :return: A boolean array of shape ``(n_points, )`` specifying whether each of the points lies inside the confidence region. Methods ~~~~~~~ The following values are valid for the ``method`` argument. - ``'pce'``: Posterior Covariance Ellipsoid. Computes the covariance matrix of the particle distribution marginalized over the excluded slices and uses the :math:`\chi^2` distribution to determine how to rescale it such the the corresponding ellipsoid has the correct size. The ellipsoid is translated by the mean of the particle distribution. It is determined which of the ``points`` are on the interior. - ``'hpd-hull'``: High Posterior Density Convex Hull. See :meth:`SMCUpdater.region_est_hull`. Computes the HPD region resulting from the particle approximation, computes the convex hull of this, and it is determined which of the ``points`` are on the interior. - ``'hpd-mvee'``: High Posterior Density Minimum Volume Enclosing Ellipsoid. See :meth:`SMCUpdater.region_est_ellipsoid` and :meth:`~qinfer.utils.mvee`. Computes the HPD region resulting from the particle approximation, computes the convex hull of this, and determines the minimum enclosing ellipsoid. Deterimines which of the ``points`` are on the interior. """ if method == 'pce': s_ = np.s_[modelparam_slice] if modelparam_slice is not None else np.s_[:] A = self.est_covariance_mtx()[s_, s_] c = self.est_mean()[s_] # chi-squared distribution gives correct level curve conversion mult = st.chi2.ppf(level, c.size) results = u.in_ellipsoid(points, mult * A, c) elif method == 'hpd-mvee': tol = 0.0001 if tol is None else tol A, c = self.region_est_ellipsoid(level=level, tol=tol, modelparam_slice=modelparam_slice) results = u.in_ellipsoid(points, np.linalg.inv(A), c) elif method == 'hpd-hull': # it would be more natural to call region_est_hull, # but that function uses ConvexHull which has no # easy way of determining if a point is interior. # Here, Delaunay gives us access to all of the # necessary simplices. # this fills the convex hull with (n_mps+1)-dimensional # simplices; the convex hull is an almost-everywhere # disjoint union of these simplices hull = Delaunay(self.est_credible_region(level=level, modelparam_slice=modelparam_slice)) # now we just check whether each of the given points are in # any of the simplices. (http://stackoverflow.com/a/16898636/1082565) results = hull.find_simplex(points) >= 0 return results class ProductDistribution(Distribution): r""" Takes a non-zero number of QInfer distributions :math:`D_k` as input and returns their Cartesian product. In other words, the returned distribution is :math:`\Pr(D_1, \dots, D_N) = \prod_k \Pr(D_k)`. :param Distribution factors: Distribution objects representing :math:`D_k`. Alternatively, one iterable argument can be given, in which case the factors are the values drawn from that iterator. """ def __init__(self, *factors): if len(factors) == 1: try: self._factors = list(factors[0]) except: self._factors = factors else: self._factors = factors @property def n_rvs(self): return sum([f.n_rvs for f in self._factors]) def sample(self, n=1): return np.hstack([f.sample(n) for f in self._factors]) _DEFAULT_RANGES = np.array([[0, 1]]) _DEFAULT_RANGES.flags.writeable = False # Prevent anyone from modifying the # default ranges. ## CLASSES ################################################################### class UniformDistribution(Distribution): """ Uniform distribution on a given rectangular region. :param numpy.ndarray ranges: Array of shape ``(n_rvs, 2)``, where ``n_rvs`` is the number of random variables, specifying the upper and lower limits for each variable. """ def __init__(self, ranges=_DEFAULT_RANGES): if not isinstance(ranges, np.ndarray): ranges = np.array(ranges) if len(ranges.shape) == 1: ranges = ranges[np.newaxis, ...] self._ranges = ranges self._n_rvs = ranges.shape[0] self._delta = ranges[:, 1] - ranges[:, 0] @property def n_rvs(self): return self._n_rvs def sample(self, n=1): shape = (n, self._n_rvs)# if n == 1 else (self._n_rvs, n) z = np.random.random(shape) return self._ranges[:, 0] + z * self._delta def grad_log_pdf(self, var): # THIS IS NOT TECHNICALLY LEGIT; BCRB doesn't technically work with a # prior that doesn't go to 0 at its end points. But we do it anyway. if var.shape[0] == 1: return 12/(self._delta)**2 else: return np.zeros(var.shape) class ConstantDistribution(Distribution): """ Represents a determinstic variable; useful for combining with other distributions, marginalizing, etc. :param values: Shape ``(n,)`` array or list of values :math:`X_0` such that :math:`\Pr(X) = \delta(X - X_0)`. """ def __init__(self, values): self._values = np.array(values)[np.newaxis, :] @property def n_rvs(self): return self._values.shape[1] def sample(self, n=1): return np.repeat(self._values, n, axis=0) class NormalDistribution(Distribution): """ Normal or truncated normal distribution over a single random variable. :param float mean: Mean of the represented random variable. :param float var: Variance of the represented random variable. :param tuple trunc: Limits at which the PDF of this distribution should be truncated, or ``None`` if the distribution is to have infinite support. """ def __init__(self, mean, var, trunc=None): self.mean = mean self.var = var if trunc is not None: low, high = trunc sigma = np.sqrt(var) a = (low - mean) / sigma b = (high - mean) / sigma self.dist = partial(scipy_dist, 'truncnorm', a, b, loc=mean, scale=np.sqrt(var)) else: self.dist = partial(scipy_dist, 'norm', mean, np.sqrt(var)) @property def n_rvs(self): return 1 def sample(self, n=1): return self.dist().rvs(size=n)[:, np.newaxis] def grad_log_pdf(self, x): return -(x - self.mean) / self.var class MultivariateNormalDistribution(Distribution): """ Multivariate (vector-valued) normal distribution. :param np.ndarray mean: Array of shape ``(n_rvs, )`` representing the mean of the distribution. :param np.ndarray cov: Array of shape ``(n_rvs, n_rvs)`` representing the covariance matrix of the distribution. """ def __init__(self, mean, cov): # Flatten the mean first, so we have a strong guarantee about its # shape. self.mean = np.array(mean).flatten() self.cov = cov self.invcov = la.inv(cov) @property def n_rvs(self): return self.mean.shape[0] def sample(self, n=1): return np.einsum("ij,nj->ni", la.sqrtm(self.cov), np.random.randn(n, self.n_rvs)) + self.mean def grad_log_pdf(self, x): return -np.dot(self.invcov, (x - self.mean).transpose()).transpose() class SlantedNormalDistribution(Distribution): r""" Uniform distribution on a given rectangular region with additive noise. Random variates from this distribution follow :math:`X+Y` where :math:`X` is drawn uniformly with respect to the rectangular region defined by ranges, and :math:`Y` is normally distributed about 0 with variance ``weight**2``. :param numpy.ndarray ranges: Array of shape ``(n_rvs, 2)``, where ``n_rvs`` is the number of random variables, specifying the upper and lower limits for each variable. :param float weight: Number specifying the inverse variance of the additive noise term. """ def __init__(self, ranges=_DEFAULT_RANGES, weight=0.01): if not isinstance(ranges, np.ndarray): ranges = np.array(ranges) if len(ranges.shape) == 1: ranges = ranges[np.newaxis, ...] self._ranges = ranges self._n_rvs = ranges.shape[0] self._delta = ranges[:, 1] - ranges[:, 0] self._weight = weight @property def n_rvs(self): return self._n_rvs def sample(self, n=1): shape = (n, self._n_rvs)# if n == 1 else (self._n_rvs, n) z = np.random.randn(n, self._n_rvs) return self._ranges[:, 0] + \ self._weight*z + \ np.random.rand(n, self._n_rvs)*self._delta[np.newaxis,:] class LogNormalDistribution(Distribution): """ Log-normal distribution. :param mu: Location parameter (numeric), set to 0 by default. :param sigma: Scale parameter (numeric), set to 1 by default. Must be strictly greater than zero. """ def __init__(self, mu=0, sigma=1): self.mu = mu # lognormal location parameter self.sigma = sigma # lognormal scale parameter self.dist = partial(scipy_dist, 'lognorm', 1, mu, sigma) # scipy distribution location = 0 @property def n_rvs(self): return 1 def sample(self, n=1): return self.dist().rvs(size=n)[:, np.newaxis] class BetaDistribution(Distribution): r""" The beta distribution, whose pdf at :math:`x` is proportional to :math:`x^{\alpha-1}(1-x)^{\beta-1}`. Note that either ``alpha`` and ``beta``, or ``mean`` and ``var``, must be specified as inputs; either case uniquely determines the distribution. :param float alpha: The alpha shape parameter of the beta distribution. :param float beta: The beta shape parameter of the beta distribution. :param float mean: The desired mean value of the beta distribution. :param float var: The desired variance of the beta distribution. """ def __init__(self, alpha=None, beta=None, mean=None, var=None): if alpha is not None and beta is not None: self.alpha = alpha self.beta = beta self.mean = alpha / (alpha + beta) self.var = alpha * beta / ((alpha + beta) ** 2 * (alpha + beta + 1)) elif mean is not None and var is not None: self.mean = mean self.var = var self.alpha = mean ** 2 * (1 - mean) / var - mean self.beta = (1 - mean) ** 2 * mean / var - (1 - mean) else: raise ValueError( "BetaDistribution requires either (alpha and beta) " "or (mean and var)." ) self.dist = st.beta(a=self.alpha, b=self.beta) @property def n_rvs(self): return 1 def sample(self, n=1): return self.dist.rvs(size=n)[:, np.newaxis] class DirichletDistribution(Distribution): r""" The dirichlet distribution, whose pdf at :math:`x` is proportional to :math:`\prod_i x_i^{\alpha_i-1}`. :param alpha: The list of concentration parameters. """ def __init__(self, alpha): self._alpha = np.array(alpha) if self.alpha.ndim != 1: raise ValueError('The input alpha must be a 1D list of concentration parameters.') self._dist = st.dirichlet(alpha=self.alpha) @property def alpha(self): return self._alpha @property def n_rvs(self): return self._alpha.size def sample(self, n=1): return self._dist.rvs(size=n) class BetaBinomialDistribution(Distribution): r""" The beta-binomial distribution, whose pmf at the non-negative integer :math:`k` is equal to :math:`\binom{n}{k}\frac{B(k+\alpha,n-k+\beta)}{B(\alpha,\beta)}` with :math:`B(\cdot,\cdot)` the beta function. This is the compound distribution whose variates are binomial distributed with a bias chosen from a beta distribution. Note that either ``alpha`` and ``beta``, or ``mean`` and ``var``, must be specified as inputs; either case uniquely determines the distribution. :param int n: The :math:`n` parameter of the beta-binomial distribution. :param float alpha: The alpha shape parameter of the beta-binomial distribution. :param float beta: The beta shape parameter of the beta-binomial distribution. :param float mean: The desired mean value of the beta-binomial distribution. :param float var: The desired variance of the beta-binomial distribution. """ def __init__(self, n, alpha=None, beta=None, mean=None, var=None): self.n = n if alpha is not None and beta is not None: self.alpha = alpha self.beta = beta self.mean = n * alpha / (alpha + beta) self.var = n * alpha * beta * (alpha + beta + n) / ((alpha + beta) ** 2 * (alpha + beta + 1)) elif mean is not None and var is not None: self.mean = mean self.var = var self.alpha = - mean * (var + mean **2 - n * mean) / (mean ** 2 + n * (var - mean)) self.beta = (n - mean) * (var + mean ** 2 - n * mean) / ((n - mean) * mean - n * var) else: raise ValueError("BetaBinomialDistribution requires either (alpha and beta) or (mean and var).") # Beta-binomial is a compound distribution, drawing binomial # RVs off of a beta-distrubuted bias. self._p_dist = st.beta(a=self.alpha, b=self.beta) @property def n_rvs(self): return 1 def sample(self, n=1): p_vals = self._p_dist.rvs(size=n)[:, np.newaxis] # numpy.random.binomial supports sampling using different p values, # whereas scipy does not. return np.random.binomial(self.n, p_vals) class GammaDistribution(Distribution): r""" The gamma distribution, whose pdf at :math:`x` is proportional to :math:`x^{-\alpha-1}e^{-x\beta}`. Note that either alpha and beta, or mean and var, must be specified as inputs; either case uniquely determines the distribution. :param float alpha: The alpha shape parameter of the gamma distribution. :param float beta: The beta shape parameter of the gamma distribution. :param float mean: The desired mean value of the gamma distribution. :param float var: The desired variance of the gamma distribution. """ def __init__(self, alpha=None, beta=None, mean=None, var=None): if alpha is not None and beta is not None: self.alpha = alpha self.beta = beta self.mean = alpha / beta self.var = alpha / beta ** 2 elif mean is not None and var is not None: self.mean = mean self.var = var self.alpha = mean ** 2 / var self.beta = mean / var else: raise ValueError("GammaDistribution requires either (alpha and beta) or (mean and var).") # This is the distribution we want up to a scale factor of beta self._dist = st.gamma(self.alpha) @property def n_rvs(self): return 1 def sample(self, n=1): return self._dist.rvs(size=n)[:, np.newaxis] / self.beta class MVUniformDistribution(Distribution): r""" Uniform distribution over the rectangle :math:`[0,1]^{\text{dim}}` with the restriction that vector must sum to 1. Equivalently, a uniform distribution over the ``dim-1`` simplex whose vertices are the canonical unit vectors of :math:`\mathbb{R}^\text{dim}`. :param int dim: Number of dimensions; ``n_rvs``. """ def __init__(self, dim = 6): warnings.warn( "This class has been deprecated, and may " "be renamed in future versions.", DeprecationWarning ) self._dim = dim @property def n_rvs(self): return self._dim def sample(self, n = 1): return np.random.mtrand.dirichlet(np.ones(self._dim),n) class DiscreteUniformDistribution(Distribution): """ Discrete uniform distribution over the integers between ``0`` and ``2**num_bits-1`` inclusive. :param int num_bits: non-negative integer specifying how big to make the interval. """ def __init__(self, num_bits): self._num_bits = num_bits @property def n_rvs(self): return 1 def sample(self, n=1): z = np.random.randint(2**self._num_bits,size=n) return z class HilbertSchmidtUniform(SingleSampleMixin, Distribution): """ Creates a new Hilber-Schmidt uniform prior on state space of dimension ``dim``. See e.g. [Mez06]_ and [Mis12]_. :param int dim: Dimension of the state space. """ def __init__(self, dim=2): warnings.warn( "This class has been deprecated; please see " "qinfer.tomography.GinibreDistribution(rank=None).", DeprecationWarning ) self.dim = dim self.paulis1Q = np.array([[[1,0],[0,1]],[[1,0],[0,-1]],[[0,-1j],[1j,0]],[[0,1],[1,0]]]) self.paulis = self.make_Paulis(self.paulis1Q, 4) @property def n_rvs(self): return self.dim**2 - 1 def sample(self): #Generate random unitary (see e.g. http://arxiv.org/abs/math-ph/0609050v2) g = (np.random.randn(self.dim,self.dim) + 1j*np.random.randn(self.dim,self.dim))/np.sqrt(2.0) q,r = la.qr(g) d = np.diag(r) ph = d/np.abs(d) ph = np.diag(ph) U = np.dot(q,ph) #Generate random matrix z = np.random.randn(self.dim,self.dim) + 1j*np.random.randn(self.dim,self.dim) rho = np.dot(np.dot(np.identity(self.dim)+U,np.dot(z,z.conj().transpose())),np.identity(self.dim)+U.conj().transpose()) rho = rho/np.trace(rho) x = np.zeros([self.n_rvs]) for idx in range(self.n_rvs): x[idx] = np.real(np.trace(np.dot(rho,self.paulis[idx+1]))) return x def make_Paulis(self,paulis,d): if d == self.dim*2: return paulis else: temp = np.zeros([d**2,d,d],dtype='complex128') for idx in range(temp.shape[0]): temp[idx,:] = np.kron(paulis[np.trunc(idx/d)], self.paulis1Q[idx % 4]) return self.make_Paulis(temp,d*2) class HaarUniform(SingleSampleMixin, Distribution): """ Haar uniform distribution of pure states of dimension ``dim``, parameterized as coefficients of the Pauli basis. :param int dim: Dimension of the state space. .. note:: This distribution presently only works for ``dim==2`` and the Pauli basis. """ def __init__(self, dim=2): warnings.warn( "This class has been deprecated; please see " "qinfer.tomography.GinibreDistribution(rank=1).", DeprecationWarning ) # TODO: add basis as an option self.dim = dim @property def n_rvs(self): return 3 def _sample(self): #Generate random unitary (see e.g. http://arxiv.org/abs/math-ph/0609050v2) z = (np.random.randn(self.dim,self.dim) + 1j*np.random.randn(self.dim,self.dim))/np.sqrt(2.0) q,r = la.qr(z) d = np.diag(r) ph = d/np.abs(d) ph = np.diag(ph) U = np.dot(q,ph) #TODO: generalize this to general dimensions #Apply Haar random unitary to |0> state to get random pure state psi = np.dot(U,np.array([1,0])) z = np.real(np.dot(psi.conj(),np.dot(np.array([[1,0],[0,-1]]),psi))) y = np.real(np.dot(psi.conj(),np.dot(np.array([[0,-1j],[1j,0]]),psi))) x = np.real(np.dot(psi.conj(),np.dot(np.array([[0,1],[1,0]]),psi))) return np.array([x,y,z]) class GinibreUniform(SingleSampleMixin, Distribution): """ Creates a prior on state space of dimension dim according to the Ginibre ensemble with parameter ``k``. See e.g. [Mis12]_. :param int dim: Dimension of the state space. """ def __init__(self,dim=2, k=2): warnings.warn( "This class has been deprecated; please see " "qinfer.tomography.GinibreDistribution.", DeprecationWarning ) self.dim = dim self.k = k @property def n_rvs(self): return 3 def _sample(self): #Generate random matrix z = np.random.randn(self.dim,self.k) + 1j*np.random.randn(self.dim,self.k) rho = np.dot(z,z.conj().transpose()) rho = rho/np.trace(rho) z = np.real(np.trace(np.dot(rho,np.array([[1,0],[0,-1]])))) y = np.real(np.trace(np.dot(rho,np.array([[0,-1j],[1j,0]])))) x = np.real(np.trace(np.dot(rho,np.array([[0,1],[1,0]])))) return np.array([x,y,z]) class PostselectedDistribution(Distribution): """ Postselects a distribution based on validity within a given model. """ # TODO: rewrite LiuWestResampler in terms of this and a # new MixtureDistribution. def __init__(self, distribution, model, maxiters=100): self._dist = distribution self._model = model self._maxiters = maxiters @property def n_rvs(self): return self._dist.n_rvs def sample(self, n=1): """ Returns one or more samples from this probability distribution. :param int n: Number of samples to return. :return numpy.ndarray: An array containing samples from the distribution of shape ``(n, d)``, where ``d`` is the number of random variables. """ samples = np.empty((n, self.n_rvs)) idxs_to_sample = np.arange(n) iters = 0 while idxs_to_sample.size and iters < self._maxiters: samples[idxs_to_sample] = self._dist.sample(len(idxs_to_sample)) idxs_to_sample = idxs_to_sample[np.nonzero(np.logical_not( self._model.are_models_valid(samples[idxs_to_sample, :]) ))[0]] iters += 1 if idxs_to_sample.size: raise RuntimeError("Did not successfully postselect within {} iterations.".format(self._maxiters)) return samples def grad_log_pdf(self, x): return self._dist.grad_log_pdf(x) class InterpolatedUnivariateDistribution(Distribution): """ Samples from a single-variable distribution specified by its PDF. The samples are drawn by first drawing uniform samples over the interval ``[0, 1]``, and then using an interpolation of the inverse-CDF corresponding to the given PDF to transform these samples into the desired distribution. :param callable pdf: Vectorized single-argument function that evaluates the PDF of the desired distribution. :param float compactification_scale: Scale of the compactified coordinates used to interpolate the given PDF. :param int n_interp_points: The number of points at which to sample the given PDF. """ def __init__(self, pdf, compactification_scale=1, n_interp_points=1500): self._pdf = pdf self._xs = u.compactspace(compactification_scale, n_interp_points) self._generate_interp() def _generate_interp(self): xs = self._xs pdfs = self._pdf(xs) norm_factor = np.trapz(pdfs, xs) self._cdfs = cumtrapz(pdfs / norm_factor, xs, initial=0) self._interp_inv_cdf = interp1d(self._cdfs, xs, bounds_error=False) @property def n_rvs(self): return 1 def sample(self, n=1): return self._interp_inv_cdf(np.random.random(n))[:, np.newaxis] class ConstrainedSumDistribution(Distribution): """ Samples from an underlying distribution and then enforces that all samples must sum to some given value by normalizing each sample. :param Distribution underlying_distribution: Underlying probability distribution. :param float desired_total: Desired sum of each sample. """ def __init__(self, underlying_distribution, desired_total=1): super(ConstrainedSumDistribution, self).__init__() self._ud = underlying_distribution self.desired_total = desired_total @property def underlying_distribution(self): return self._ud @property def n_rvs(self): return self.underlying_distribution.n_rvs def sample(self, n=1): s = self.underlying_distribution.sample(n) totals = np.sum(s, axis=1)[:,np.newaxis] return self.desired_total * np.sign(totals) * s / totals<|fim▁end|>
:param numpy.ndarray locations: Locations of each particle in array of shape ``(n_particles, n_modelparams)`` :rtype: :class:`numpy.ndarray`, shape ``(n_modelparams,)``.
<|file_name|>issue-38091-2.rs<|end_file_name|><|fim▁begin|>// build-fail //~^ ERROR overflow evaluating the requirement `i32: Check` #![feature(specialization)] //~^ WARN the feature `specialization` is incomplete trait Iterate<'a> { type Ty: Valid; fn iterate(self); } impl<'a, T> Iterate<'a> for T where<|fim▁hole|> default type Ty = (); default fn iterate(self) {} } trait Check {} impl<'a, T> Check for T where <T as Iterate<'a>>::Ty: Valid {} trait Valid {} impl Valid for () {} fn main() { Iterate::iterate(0); }<|fim▁end|>
T: Check, {
<|file_name|>StringUtils.java<|end_file_name|><|fim▁begin|>/* * Copyright 2016 Code Above Lab LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.codeabovelab.dm.common.utils; import java.util.function.Function; import java.util.function.IntPredicate; import java.util.function.Supplier; import java.util.regex.Matcher; import java.util.regex.Pattern; /** */ public class StringUtils { private StringUtils() { } public static String before(String s, char c) { return beforeOr(s, c, () -> { // we throw exception for preserve old behavior throw new IllegalArgumentException("String '" + s + "' must contains '" + c + "'."); }); } /** * Return part of 's' before 'c' * @param s string which may contain char 'c' * @param c char * @param ifNone supplier of value which is used when 'c' is not present in 's' (null not allowed) * @return part of 's' before 'c' or 'ifNone.get()' */ public static String beforeOr(String s, char c, Supplier<String> ifNone) { int pos = s.indexOf(c); if(pos < 0) { return ifNone.get(); } return s.substring(0, pos); } public static String after(String s, char c) { int pos = s.indexOf(c); if(pos < 0) { throw new IllegalArgumentException("String '" + s + "' must contains '" + c + "'."); } return s.substring(pos + 1); } public static String beforeLast(String s, char c) { int pos = s.lastIndexOf(c); if(pos < 0) { throw new IllegalArgumentException("String '" + s + "' must contains '" + c + "'."); } return s.substring(0, pos); } public static String afterLast(String s, char c) { int pos = s.lastIndexOf(c); if(pos < 0) { throw new IllegalArgumentException("String '" + s + "' must contains '" + c + "'."); } return s.substring(pos + 1); } /** * Split string into two pieces at last appearing of delimiter. * @param s string * @param c delimiter * @return null if string does not contains delimiter */ public static String[] splitLast(String s, char c) { int pos = s.lastIndexOf(c); if(pos < 0) { return null; } return new String[] {s.substring(0, pos), s.substring(pos + 1)}; } /** * Split string into two pieces at last appearing of delimiter. * @param s string * @param delimiter delimiter * @return null if string does not contains delimiter */ public static String[] splitLast(String s, String delimiter) { int pos = s.lastIndexOf(delimiter); if(pos < 0) { return null; } return new String[] {s.substring(0, pos), s.substring(pos + delimiter.length())}; } /** * Return string which contains only chars for which charJudge give true. * @param src source string, may be null * @param charJudge predicate which consume codePoint (not chars) * @return string, null when incoming string is null */ public static String retain(String src, IntPredicate charJudge) { if (src == null) { return null; } final int length = src.length(); StringBuilder sb = new StringBuilder(length); for (int i = 0; i < length; i++) { int cp = src.codePointAt(i); if(charJudge.test(cp)) { sb.appendCodePoint(cp); } } return sb.toString(); } /** * Retain only characters which is {@link #isAz09(int)} * @param src source string, may be null * @return string, null when incoming string is null */ public static String retainAz09(String src) { return retain(src, StringUtils::isAz09); } /** * Retain chars which is acceptable as file name or part of url on most operation systems. <p/> * It: <code>'A'-'z', '0'-'9', '_', '-', '.'</code> * @param src source string, may be null * @return string, null when incoming string is null */ public static String retainForFileName(String src) { return retain(src, StringUtils::isAz09); } /** * Test that specified codePoint is an ASCII letter or digit * @param cp codePoint * @return true for specified chars */ public static boolean isAz09(int cp) { return cp >= '0' && cp <= '9' || cp >= 'a' && cp <= 'z' || cp >= 'A' && cp <= 'Z'; } /** * Test that specified codePoint is an ASCII letter, digit or hyphen '-'. * @param cp codePoint * @return true for specified chars */ public static boolean isAz09Hyp(int cp) { return isAz09(cp) || cp == '-'; } /** * Test that specified codePoint is an ASCII letter, digit or hyphen '-', '_', ':', '.'. <p/> * It common matcher that limit alphabet acceptable for our system IDs. * @param cp codePoint * @return true for specified chars */ public static boolean isId(int cp) { return isAz09(cp) || cp == '-' || cp == '_' || cp == ':' || cp == '.'; } public static boolean isHex(int cp) { return cp >= '0' && cp <= '9' ||<|fim▁hole|> } /** * Chars which is acceptable as file name or part of url on most operation systems. <p/> * It: <code>'A'-'z', '0'-'9', '_', '-', '.'</code> * @param cp codePoint * @return true for specified chars */ public static boolean isForFileName(int cp) { return isAz09(cp) || cp == '-' || cp == '_' || cp == '.'; } /** * Invoke {@link Object#toString()} on specified argument, if arg is null then return null. * @param o * @return null or result of o.toString() */ public static String valueOf(Object o) { return o == null? null : o.toString(); } /** * Test that each char of specified string match for predicate. <p/> * Note that it method does not support unicode, because it usual applicable only for match letters that placed under 128 code. * @param str string * @param predicate char matcher * @return true if all chars match */ public static boolean match(String str, IntPredicate predicate) { final int len = str.length(); if(len == 0) { return false; } for(int i = 0; i < len; i++) { if(!predicate.test(str.charAt(i))) { return false; } } return true; } /** * Is a <code>match(str, StringUtils::isAz09);</code>. * @param str string * @return true if string match [A-Za-z0-9]* */ public static boolean matchAz09(String str) { return match(str, StringUtils::isAz09); } /** * Is a <code>match(str, StringUtils::isAz09Hyp);</code>. * @param str string * @return true if string match [A-Za-z0-9-]* */ public static boolean matchAz09Hyp(String str) { return match(str, StringUtils::isAz09Hyp); } /** * Is a <code>match(str, StringUtils::isId);</code>. * @param str string * @return true if string match [A-Za-z0-9-_:.]* */ public static boolean matchId(String str) { return match(str, StringUtils::isId); } public static boolean matchHex(String str) { return match(str, StringUtils::isHex); } /** * Replace string with pattern obtaining replacement values through handler function. <p/> * Note that it differ from usual Pattern behavior when it process replacement for group references, * this code do nothing with replacement. * @param pattern pattern * @param src source string * @param handler function which take matched part of source string and return replacement value, must never return null * @return result string */ public static String replace(Pattern pattern, String src, Function<String, String> handler) { StringBuilder sb = null; Matcher matcher = pattern.matcher(src); int pos = 0; while(matcher.find()) { if(sb == null) { // replacement can be a very rare operation, and we not need excess string buffer sb = new StringBuilder(); } String expr = matcher.group(); String replacement = handler.apply(expr); sb.append(src, pos, matcher.start()); sb.append(replacement); pos = matcher.end(); } if(sb == null) { return src; } sb.append(src, pos, src.length()); return sb.toString(); } }<|fim▁end|>
cp >= 'a' && cp <= 'f' || cp >= 'A' && cp <= 'F';
<|file_name|>node-console.js<|end_file_name|><|fim▁begin|>require('./node')<|fim▁hole|><|fim▁end|>
require('./console')
<|file_name|>user.js<|end_file_name|><|fim▁begin|>import Ember from 'ember'; import ApplicationSerializer from 'ghost-admin/serializers/application'; import EmbeddedRecordsMixin from 'ember-data/serializers/embedded-records-mixin'; const {String: {pluralize}} = Ember; export default ApplicationSerializer.extend(EmbeddedRecordsMixin, { attrs: { roles: {embedded: 'always'},<|fim▁hole|> extractSingle(store, primaryType, payload) { let root = this.keyForAttribute(primaryType.modelName); let pluralizedRoot = pluralize(primaryType.modelName); payload[root] = payload[pluralizedRoot][0]; delete payload[pluralizedRoot]; return this._super(...arguments); }, normalizeSingleResponse(store, primaryModelClass, payload) { let root = this.keyForAttribute(primaryModelClass.modelName); let pluralizedRoot = pluralize(primaryModelClass.modelName); if (payload[pluralizedRoot]) { payload[root] = payload[pluralizedRoot][0]; delete payload[pluralizedRoot]; } return this._super(...arguments); } });<|fim▁end|>
lastLoginUTC: {key: 'last_login'}, createdAtUTC: {key: 'created_at'}, updatedAtUTC: {key: 'updated_at'} },
<|file_name|>svg.js<|end_file_name|><|fim▁begin|>/* app/ui/map/svg */ define( [ 'jquery', 'raphael', 'app/ui/map/data', 'app/ui/map/util', 'util/detector', 'pubsub' ], function ($, Raphael, MapData, MapUtil, Detector) { 'use strict'; var SVG; return { nzte: {}, markers: [], exports: {}, countryText: {}, sets: {}, continentSets: {}, text: {}, raphael: null, _$container: null, _isExportsMap: false, init: function () { SVG = this; this._$container = $('.js-map-container'); this._isExportsMap = $('#js-map-nzte').length ? false : true; //If already setup just show the map again if (this._$container.is('.is-loaded')) { this._$container.show(); return; } if (this._isExportsMap) { this._initInteractiveMap(); return; } this._initRegularMap(); }, _initInteractiveMap: function () { this._setUpMap(); this._drawMap(); this._createContinentSets(); this._initInteractiveMapEvents(); this._setLoaded(); this._hideLoader(); }, _initRegularMap: function () { this._setUpMap(); this._drawMap(); this._createSets(); this._initRegularMapEvents(); this._setLoaded(); this._hideLoader(); }, _setLoaded: function () { this._$container.addClass('is-loaded'); }, _hideLoader: function () { $.publish('/loader/hide'); }, _setUpMap: function () { var id = this._isExportsMap ? 'js-map-exports' : 'js-map-nzte'; this._$container.show(); this.raphael = Raphael(id, '100%', '100%'); this.raphael.setViewBox(0, 0, 841, 407, true); this.raphael.canvas.setAttribute('preserveAspectRatio', 'xMinYMin meet'); }, _drawMap: function () { this._addMainMap(); this._addContinentMarkers(); this._addContinentMarkerText(); if (this._isExportsMap) { this._addCountryMarkers(); } }, _addMainMap: function () { var mainAttr = { stroke: 'none', fill: '#dededd', 'fill-rule': 'evenodd', 'stroke-linejoin': 'round' }; this.nzte.main = this.raphael.path(MapData.main).attr(mainAttr); }, _addContinentMarkers: function () { var markerAttr = { stroke: 'none', fill: '#f79432', 'stroke-linejoin': 'round', cursor: 'pointer' }; var markerPaths = MapData.markers[0]; for (var continent in markerPaths) { if (!this._isExportsMap || this._isExportsMap && continent !== 'new-zealand') { this.markers[continent] = this.raphael.path(markerPaths[continent]).attr(markerAttr); } } }, _addContinentMarkerText: function () { var textAttr = { stroke: 'none', fill: '#ffffff', 'fill-rule': 'evenodd', 'stroke-linejoin': 'round', cursor: 'pointer' }; var textPaths = MapData.text[0]; for (var continent in textPaths) { if (!this._isExportsMap || this._isExportsMap && continent !== 'new-zealand') { this.text[continent] = this.raphael.path(textPaths[continent]).attr(textAttr); } } }, _addCountryMarkers: function () { var marker; for (var region in this.markers) { marker = this.markers[region]; this._createHoverBox(region, marker); } }, _createHoverBox: function (region, marker) { var set; var markerAttr = { stroke: 'none', fill: '#116697', opacity: 0, 'stroke-linejoin': 'round' }; var markerPaths = MapData.exportsText[0]; var country = markerPaths[region]; if (!country) { return; } var countryText = markerPaths[region][0]; var numberOfCountries = Object.keys(countryText).length; var markerBox = marker.getBBox(); var topX = markerBox.x; var bottomY = markerBox.y2; var width = region !== 'india-middle-east-and-africa' ? 150 : 200; //Add the rectangle this.exports[region] = this.raphael.rect(topX + 28, bottomY - 1, width, (21 * numberOfCountries) + 5).toBack().attr(markerAttr); //Create a set to combine countries, hover box and region icon/text set = this.raphael.set(); set.push( this.exports[region] ); //Add the country Text this._addCountryText(markerBox, countryText, topX + 28, bottomY - 1, 21, region, set); }, _addCountryText: function (markerBox, countryText, x, y, height, region, set) { var updatedX = x + 10; var updatedY = y + 10; var textAttr = { font: '13px Arial', textAlign: 'left', fill: "#ffffff", cursor: 'pointer', 'text-decoration': 'underline', 'text-anchor': 'start', opacity: 0 }; for (var country in countryText) { var text = countryText[country].toUpperCase(); this.countryText[country] = this.raphael.text(updatedX, updatedY, text).toBack().attr(textAttr); updatedY += height; set.push( this.countryText[country] ); } this.continentSets[region] = set.hide(); }, _createSets: function () { for (var region in this.markers) { var set = this.raphael.set(); set.push( this.markers[region], this.text[region] ); this.sets[region] = set; } }, _createContinentSets: function () { for (var region in this.markers) { var set = this.raphael.set(); set.push( this.markers[region], this.text[region], this.continentSets[region] ); this.sets[region] = set; } }, _initInteractiveMapEvents: function () { this._initCountryTextEvents(); this._initCountryHoverEvents(); }, _initRegularMapEvents: function () { var bounceEasing = 'cubic-bezier(0.680, -0.550, 0.265, 1.550)'; var mouseOverMarkerBounce = { transform: 's1.1' }; var mouseOverMarkerBounceWithTranslate = { transform: 's1.1t5,0' }; var mouseOutMarkerBounce = { transform: 's1' }; var mouseOverMarker = { fill: '#116697' }; var mouseOutMarker = { fill: '#f79432' }; for (var region in this.sets) { var set = this.sets[region]; var marker = this.markers[region]; var text = this.text[region]; (function (savedSet, savedRegion, savedMarker, savedText) { savedSet.attr({ cursor: 'pointer' }); savedSet.hover(function () { //A slight translation is needed for 'india-middle-east-and-africa' so when hovered it isn't clipped by container var transformAttr = savedRegion !== 'india-middle-east-and-africa' ? mouseOverMarkerBounce : mouseOverMarkerBounceWithTranslate; savedMarker.animate(transformAttr, 250, bounceEasing); savedMarker.animate(mouseOverMarker, 250, 'easeInOutExpo'); savedText.animate(transformAttr, 250, bounceEasing); }, function () { savedMarker.animate(mouseOutMarkerBounce, 250, bounceEasing); savedMarker.animate(mouseOutMarker, 250, 'easeInOutSine'); savedText.animate(mouseOutMarkerBounce, 250, bounceEasing); }); savedSet.click(function () { MapUtil.goToUrl(savedRegion, false); }); })(set, region, marker, text); } }, _initCountryHoverEvents: function () { var noHover = Detector.noSvgHover(); var mouseOverMarker = { fill: '#116697' }; var mouseOutMarker = { fill: '#f79432' }; for (var region in this.sets) { var set = this.sets[region]; var continentSet = this.continentSets[region]; var marker = this.markers[region]; var text = this.text[region]; var hoverBox = this.exports[region]; (function (savedSet, savedContinentSet, savedRegion, savedMarker, savedText, savedBox) { savedSet.attr({ cursor: 'pointer' }); if (noHover) { savedMarker.data('open', false); savedSet.click(function () { if (savedMarker.data('open') === false) { SVG._closeAllContinents(); savedMarker.data('open', true); savedMarker.animate(mouseOverMarker, 250, 'easeInOutExpo'); savedContinentSet.show().toFront().animate({ opacity: 1 }, 250, 'easeInOutExpo'); } else { savedMarker.data('open', false); savedMarker.animate(mouseOutMarker, 250, 'easeInOutSine'); savedContinentSet.animate({ opacity: 0 }, 250, 'easeInOutSine').hide().toBack(); } }); savedSet.hover(function () { savedMarker.animate(mouseOverMarker, 250, 'easeInOutExpo'); }, function () { savedMarker.data('open') === false && savedMarker.animate(mouseOutMarker, 250, 'easeInOutSine'); }); } else { savedSet.hover(function () { savedMarker.animate(mouseOverMarker, 250, 'easeInOutExpo'); savedContinentSet.show().toFront().animate({ opacity: 1 }, 250, 'easeInOutExpo'); }, function () { savedMarker.animate(mouseOutMarker, 250, 'easeInOutSine'); savedContinentSet.animate({ opacity: 0 }, 250, 'easeInOutSine').hide().toBack(); }); } })(set, continentSet, region, marker, text, hoverBox); } }, _closeAllContinents: function () { for (var region in this.sets) { var continentSet = this.continentSets[region]; var marker = this.markers[region]; var mouseOutMarker = { fill: '#f79432' }; marker.data('open', false); marker.animate(mouseOutMarker, 250, 'easeInOutSine');<|fim▁hole|> _initCountryTextEvents: function () { for (var country in this.countryText) { var text = this.countryText[country]; (function (savedText, savedCountry) { savedText.click(function () { MapUtil.goToUrl(savedCountry, true); }); savedText.hover(function () { savedText.animate({ 'fill-opacity': 0.6 }, 250, 'easeInOutSine'); }, function () { savedText.animate({ 'fill-opacity': 1 }, 250, 'easeInOutSine'); }); })(text, country); } } }; } );<|fim▁end|>
continentSet.animate({ opacity: 0 }, 250, 'easeInOutSine').hide().toBack(); } },
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
from __future__ import absolute_import
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># Copyright (C) 2014 Andrey Antukh <[email protected]> # Copyright (C) 2014 Jesús Espino <[email protected]> # Copyright (C) 2014 David Barragán <[email protected]> # Copyright (C) 2014 Anler Hernández <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.apps import apps from .choices import NotifyLevel from taiga.base.utils.text import strip_lines def attach_watchers_to_queryset(queryset, as_field="watchers"): """Attach watching user ids to each object of the queryset. :param queryset: A Django queryset object. :param as_field: Attach the watchers as an attribute with this name. :return: Queryset object with the additional `as_field` field. """ model = queryset.model type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model) sql = ("""SELECT array(SELECT user_id FROM notifications_watched WHERE notifications_watched.content_type_id = {type_id} AND notifications_watched.object_id = {tbl}.id)""") sql = sql.format(type_id=type.id, tbl=model._meta.db_table) qs = queryset.extra(select={as_field: sql}) return qs def attach_is_watcher_to_queryset(queryset, user, as_field="is_watcher"): """Attach is_watcher boolean to each object of the queryset. :param user: A users.User object model :param queryset: A Django queryset object. :param as_field: Attach the boolean as an attribute with this name. :return: Queryset object with the additional `as_field` field. """ model = queryset.model type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model) sql = ("""SELECT CASE WHEN (SELECT count(*) FROM notifications_watched WHERE notifications_watched.content_type_id = {type_id} AND notifications_watched.object_id = {tbl}.id AND notifications_watched.user_id = {user_id}) > 0 THEN TRUE ELSE FALSE END""") sql = sql.format(type_id=type.id, tbl=model._meta.db_table, user_id=user.id) qs = queryset.extra(select={as_field: sql}) return qs def attach_total_watchers_to_queryset(queryset, as_field="total_watchers"): """Attach total_watchers boolean to each object of the queryset. :param user: A users.User object model :param queryset: A Django queryset object. :param as_field: Attach the boolean as an attribute with this name. :return: Queryset object with the additional `as_field` field. """ model = queryset.model type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model) sql = ("""SELECT count(*) FROM notifications_watched<|fim▁hole|> WHERE notifications_watched.content_type_id = {type_id} AND notifications_watched.object_id = {tbl}.id""") sql = sql.format(type_id=type.id, tbl=model._meta.db_table) qs = queryset.extra(select={as_field: sql}) return qs def attach_project_is_watcher_to_queryset(queryset, user, as_field="is_watcher"): """Attach is_watcher boolean to each object of the projects queryset. :param user: A users.User object model :param queryset: A Django projects queryset object. :param as_field: Attach the boolean as an attribute with this name. :return: Queryset object with the additional `as_field` field. """ model = queryset.model type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model) sql = ("""SELECT CASE WHEN (SELECT count(*) FROM notifications_notifypolicy WHERE notifications_notifypolicy.project_id = {tbl}.id AND notifications_notifypolicy.user_id = {user_id} AND notifications_notifypolicy.notify_level != {ignore_notify_level}) > 0 THEN TRUE ELSE FALSE END""") sql = sql.format(tbl=model._meta.db_table, user_id=user.id, ignore_notify_level=NotifyLevel.ignore) qs = queryset.extra(select={as_field: sql}) return qs def attach_project_total_watchers_attrs_to_queryset(queryset, as_field="total_watchers"): """Attach watching user ids to each project of the queryset. :param queryset: A Django projects queryset object. :param as_field: Attach the watchers as an attribute with this name. :return: Queryset object with the additional `as_field` field. """ model = queryset.model type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model) sql = ("""SELECT count(user_id) FROM notifications_notifypolicy WHERE notifications_notifypolicy.project_id = {tbl}.id AND notifications_notifypolicy.notify_level != {ignore_notify_level}""") sql = sql.format(tbl=model._meta.db_table, ignore_notify_level=NotifyLevel.ignore) qs = queryset.extra(select={as_field: sql}) return qs def attach_notify_level_to_project_queryset(queryset, user): """ Function that attach "notify_level" attribute on each queryset result for query notification level of current user for each project in the most efficient way. :param queryset: A Django queryset object. :param user: A User model object. :return: Queryset object with the additional `as_field` field. """ user_id = getattr(user, "id", None) or "NULL" default_level = NotifyLevel.notwatch sql = strip_lines(""" COALESCE((SELECT notifications_notifypolicy.notify_level FROM notifications_notifypolicy WHERE notifications_notifypolicy.project_id = projects_project.id AND notifications_notifypolicy.user_id = {user_id}), {default_level}) """) sql = sql.format(user_id=user_id, default_level=default_level) return queryset.extra(select={"notify_level": sql})<|fim▁end|>
<|file_name|>packaging.py<|end_file_name|><|fim▁begin|>import os.path import subprocess import pkg_resources import setuptools # pylint: disable=unused-import def get_package_revision(package_name): # type: (str) -> str """Determine the Git commit hash for the Shopify package. If the package is installed in "develop" mode the SHA is retrieved using Git. Otherwise it will be retrieved from the package's Egg metadata. Returns an empty string if the package is not installed or does not contain revision information. """ egg_info = pkg_resources.working_set.find(pkg_resources.Requirement.parse(package_name)) if egg_info is None: return '' if os.path.exists(os.path.join(egg_info.location, '.git')): return str(subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=egg_info.location).decode()).strip() if egg_info.has_metadata('git_sha.txt'):<|fim▁hole|> def write_package_revision(cmd, _, filename): # type: (setuptools.Command, str, str) -> None """Write the Git commit hash for the package that is currently being built. If the build is not occurring from a Git checkout the current revision must be stored in a text file named "REVISION". This function should not be called except via setuptools, by specifying an 'egg_info.writers' entrypoint as follows: setuptools.setup( name='test_packaging', ... install_requires=[ 'shopify_python' ], ... entry_points={ 'egg_info.writers': [ 'git_sha.txt = shopify_python.packaging:write_package_revision', ], } ... ) """ git_sha = None if os.path.exists('.git'): git_sha = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip() elif os.path.exists('REVISION'): with open('REVISION') as revision_file: git_sha = revision_file.read().strip() if git_sha is not None: cmd.write_or_delete_file("Git SHA", filename, git_sha)<|fim▁end|>
return egg_info.get_metadata('git_sha.txt') return ''
<|file_name|>ResultSet.java<|end_file_name|><|fim▁begin|>package com.planet_ink.fakedb; import java.io.InputStream; import java.io.Reader; import java.sql.NClob; import java.sql.RowId; import java.sql.SQLException; import java.sql.SQLXML; import java.util.Map; /* Copyright 2001 Thomas Neumann Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ @SuppressWarnings("unchecked") class ResultSet implements java.sql.ResultSet { private Statement statement; private Backend.Relation relation; private java.util.Iterator iter; private int currentRow=0; private int conditionIndex; private String conditionValue; private boolean eq=true; private boolean lt=false; private boolean gt=false; private final String[] values; private final boolean[] nullIndicators; private boolean nullFlag = false; ResultSet(Statement s, Backend.Relation r, int ci, String cv, String comp) { statement=s; relation=r; conditionIndex=ci; conditionValue=cv; comp=comp.trim(); eq=(comp.indexOf("=")>=0); lt=(comp.indexOf("<")>=0); gt=(comp.indexOf(">")>=0) ; currentRow=0; values=new String[r.attributes.length]; nullIndicators=new boolean[values.length]; if ((ci<0)&&(cv!=null)) { iter=r.index.keySet().iterator(); } else { iter=r.index.values().iterator(); } } public java.sql.Statement getStatement() throws java.sql.SQLException { return statement; } public static boolean isNumber(String s) { if(s==null) return false; s=s.trim(); if(s.length()==0) return false; if((s.length()>1)&&(s.startsWith("-"))) s=s.substring(1); for(int i=0;i<s.length();i++) if("0123456789.,".indexOf(s.charAt(i))<0) return false; return true; } public static double s_double(String DOUBLE) { double sdouble=0; try{ sdouble=Double.parseDouble(DOUBLE); } catch(Exception e){ return 0;} return sdouble; } public static long s_long(String LONG) { long slong=0; try{ slong=Long.parseLong(LONG); } catch(Exception e){ return 0;} return slong; } public static boolean isDouble(String DBL) { if(DBL.length()==0) return false; if(DBL.startsWith("-")&&(DBL.length()>1)) DBL=DBL.substring(1); boolean alreadyDot=false; for(int i=0;i<DBL.length();i++) if(!Character.isDigit(DBL.charAt(i))) { if(DBL.charAt(i)=='.') { if(alreadyDot) return false; alreadyDot=true; } else return false; } return alreadyDot; } public int numCompare(String s1, String s2) { if((s1==null)||(s2==null)) return 0; <|fim▁hole|> double d2=isDouble(s2)?s_double(s2):Long.valueOf(s_long(s2)).doubleValue(); if(d1==d2) return 0; if(d1>d2) return 1; return -1; } long l1=s_long(s1); long l2=s_long(s2); if(l1==l2) return 0; if(l1>l2) return 1; return -1; } public boolean next() throws java.sql.SQLException { while (true) { if (!iter.hasNext()) return false; if ((conditionIndex<0)&&(conditionValue!=null)) { String key=(String)iter.next(); String subKey=key; int x=subKey.indexOf("\n"); if(x>0)subKey=subKey.substring(0,x); int nc=(lt||gt)?numCompare(subKey,conditionValue):0; int sc=(lt||gt)?subKey.compareTo(conditionValue):0; if(((eq)&&(subKey.equals(conditionValue))) ||((eq)&&(key.startsWith(conditionValue+"\n"))) ||((lt)&&(nc<0)) ||((gt)&&(nc>0)) ||((lt)&&(sc<0)) ||((gt)&&(sc>0))) { currentRow++; return relation.getRecord(nullIndicators,values,(Backend.RecordInfo)relation.index.get(key)); } continue; } if (!relation.getRecord(nullIndicators,values,(Backend.RecordInfo)iter.next())) return false; if (conditionIndex>=0) { if (nullIndicators[conditionIndex]) continue; String subKey=values[conditionIndex]; int nc=(lt||gt)?numCompare(subKey,conditionValue):0; int sc=(lt||gt)?subKey.compareTo(conditionValue):0; if(!(((eq)&&(subKey.equals(conditionValue))) ||((lt)&&(nc<0)) ||((gt)&&(nc>0)) ||((lt)&&(sc<0)) ||((gt)&&(sc>0)))) continue; } currentRow++; return true; } } public void close() throws java.sql.SQLException { } public boolean wasNull() throws java.sql.SQLException { return nullFlag; } public String getString(int columnIndex) throws java.sql.SQLException { if ((columnIndex<0)||(columnIndex>=nullIndicators.length)||(nullIndicators[columnIndex])) { nullFlag=true; return null; } nullFlag=false; return values[columnIndex]; } public java.sql.Array getArray(int columnIndex) throws java.sql.SQLException { //String s=getString(columnIndex); if (nullFlag) return null; throw new java.sql.SQLException(); } public java.sql.Blob getBlob(int columnIndex) throws java.sql.SQLException { //String s=getString(columnIndex); if (nullFlag) return null; throw new java.sql.SQLException(); } public java.sql.Clob getClob(int columnIndex) throws java.sql.SQLException { //String s=getString(columnIndex); if (nullFlag) return null; throw new java.sql.SQLException(); } public java.sql.Ref getRef(int columnIndex) throws java.sql.SQLException { //String s=getString(columnIndex); if (nullFlag) return null; throw new java.sql.SQLException(); } public boolean getBoolean(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if ((s!=null)&&(s.length()>0)) switch (Character.toUpperCase(s.charAt(0))) { case 'T': case 'Y': case '1': return true; } return false; } public byte getByte(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return 0; try { return Byte.parseByte(s); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public short getShort(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return 0; try { return Short.parseShort(s); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public int getInt(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return 0; try { return Integer.parseInt(s); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public long getLong(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return 0; try { return Long.parseLong(s); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public float getFloat(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return 0; try { return Float.parseFloat(s); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public double getDouble(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return 0; try { return Double.parseDouble(s); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public java.math.BigDecimal getBigDecimal(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return new java.math.BigDecimal(0); try { return new java.math.BigDecimal(s); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } /** * @deprecated */ public java.math.BigDecimal getBigDecimal(int columnIndex, int scale) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) { java.math.BigDecimal v=new java.math.BigDecimal(0); v.setScale(scale); return v; } try { java.math.BigDecimal v=new java.math.BigDecimal(s); v.setScale(scale); return v; } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public byte[] getBytes(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return null; try { return s.getBytes(); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public java.sql.Date getDate(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return null; try { return java.sql.Date.valueOf(s); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public java.sql.Time getTime(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return null; try { return java.sql.Time.valueOf(s); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public java.sql.Timestamp getTimestamp(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return null; try { return java.sql.Timestamp.valueOf(s); } catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); } } public java.io.InputStream getAsciiStream(int columnIndex) throws java.sql.SQLException { return getBinaryStream(columnIndex); } /** * @deprecated */ public java.io.InputStream getUnicodeStream(int columnIndex) throws java.sql.SQLException { return getBinaryStream(columnIndex); } public java.io.InputStream getBinaryStream(int columnIndex) throws java.sql.SQLException { byte b[] = getBytes(columnIndex); if (nullFlag) return null; return new java.io.ByteArrayInputStream(b); } public java.io.Reader getCharacterStream(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return null; return new java.io.CharArrayReader(s.toCharArray()); } public Object getObject(int columnIndex) throws java.sql.SQLException { return getString(columnIndex); } public java.net.URL getURL(int columnIndex) throws java.sql.SQLException { String s=getString(columnIndex); if (nullFlag) return null; try { return new java.net.URL(s); } catch (java.net.MalformedURLException e) { throw new java.sql.SQLException(e.getMessage()); } } public int findColumn(String columnName) throws java.sql.SQLException { return relation.findAttribute(columnName); } public String getString(String columnName) throws java.sql.SQLException { return getString(findColumn(columnName)); } public java.sql.Array getArray(String columnName) throws java.sql.SQLException { return getArray(findColumn(columnName)); } public java.sql.Blob getBlob(String columnName) throws java.sql.SQLException { return getBlob(findColumn(columnName)); } public java.sql.Clob getClob(String columnName) throws java.sql.SQLException { return getClob(findColumn(columnName)); } public java.sql.Ref getRef(String columnName) throws java.sql.SQLException { return getRef(findColumn(columnName)); } public boolean getBoolean(String columnName) throws java.sql.SQLException { return getBoolean(findColumn(columnName)); } public byte getByte(String columnName) throws java.sql.SQLException { return getByte(findColumn(columnName)); } public short getShort(String columnName) throws java.sql.SQLException { return getShort(findColumn(columnName)); } public int getInt(String columnName) throws java.sql.SQLException { return getInt(findColumn(columnName)); } public long getLong(String columnName) throws java.sql.SQLException { return getLong(findColumn(columnName)); } public float getFloat(String columnName) throws java.sql.SQLException { return getFloat(findColumn(columnName)); } public double getDouble(String columnName) throws java.sql.SQLException { return getDouble(findColumn(columnName)); } public java.math.BigDecimal getBigDecimal(String columnName) throws java.sql.SQLException { return getBigDecimal(findColumn(columnName)); } /** * @deprecated */ public java.math.BigDecimal getBigDecimal(String columnName, int scale) throws java.sql.SQLException { return getBigDecimal(findColumn(columnName), scale); } public byte[] getBytes(String columnName) throws java.sql.SQLException { return getBytes(findColumn(columnName)); } public java.sql.Date getDate(String columnName) throws java.sql.SQLException { return getDate(findColumn(columnName)); } public java.sql.Date getDate(int columnName,java.util.Calendar c) throws java.sql.SQLException { return getDate(columnName); } public java.sql.Date getDate(String columnName,java.util.Calendar c) throws java.sql.SQLException { return getDate(findColumn(columnName)); } public java.sql.Time getTime(String columnName) throws java.sql.SQLException { return getTime(findColumn(columnName)); } public java.sql.Time getTime(int columnName,java.util.Calendar c) throws java.sql.SQLException { return getTime(columnName); } public java.sql.Time getTime(String columnName,java.util.Calendar c) throws java.sql.SQLException { return getTime(findColumn(columnName)); } public java.sql.Timestamp getTimestamp(String columnName) throws java.sql.SQLException { return getTimestamp(findColumn(columnName)); } public java.sql.Timestamp getTimestamp(int columnName,java.util.Calendar c) throws java.sql.SQLException { return getTimestamp(columnName); } public java.sql.Timestamp getTimestamp(String columnName,java.util.Calendar c) throws java.sql.SQLException { return getTimestamp(findColumn(columnName)); } public java.io.Reader getCharacterStream(String columnName) throws java.sql.SQLException { return getCharacterStream(findColumn(columnName)); } public java.io.InputStream getAsciiStream(String columnName) throws java.sql.SQLException { return getAsciiStream(findColumn(columnName)); } /** * @deprecated */ public java.io.InputStream getUnicodeStream(String columnName) throws java.sql.SQLException { return getUnicodeStream(findColumn(columnName)); } public java.io.InputStream getBinaryStream(String columnName) throws java.sql.SQLException { return getBinaryStream(findColumn(columnName)); } public java.net.URL getURL(String columnName) throws java.sql.SQLException { return getURL(findColumn(columnName)); } public Object getObject(String columnName) throws java.sql.SQLException { return getObject(findColumn(columnName)); } public java.sql.SQLWarning getWarnings() throws java.sql.SQLException { return null; } public void clearWarnings() throws java.sql.SQLException { } public String getCursorName() throws java.sql.SQLException { throw new java.sql.SQLException("Positioned Update not supported.", "S1C00"); } public java.sql.ResultSetMetaData getMetaData() throws java.sql.SQLException { return null; } public void updateArray(int columnIndex,java.sql.Array x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateArray(String columnName,java.sql.Array x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateAsciiStream(int columnIndex,java.io.InputStream x,int length) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateAsciiStream(String columnName,java.io.InputStream x, int length) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateBigDecimal(int columnIndex,java.math.BigDecimal x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateBigDecimal(String columnName,java.math.BigDecimal x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateBinaryStream(int columnIndex,java.io.InputStream x,int length) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateBinaryStream(String columnName,java.io.InputStream x, int length) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateBlob(int columnIndex,java.sql.Blob x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateBlob(String columnName,java.sql.Blob x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateBoolean(int columnIndex,boolean x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateBoolean(String columnName,boolean x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateByte(int columnIndex,byte x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateByte(String columnName,byte x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateBytes(int columnIndex,byte[] x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateBytes(String columnName,byte[] x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateCharacterStream(int columnIndex,java.io.Reader x,int length) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateCharacterStream(String columnName,java.io.Reader reader, int length) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateClob(int columnIndex,java.sql.Clob x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateClob(String columnName,java.sql.Clob x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateDate(int columnIndex,java.sql.Date x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateDate(String columnName,java.sql.Date x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateDouble(int columnIndex,double x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateDouble(String columnName,double x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateFloat(int columnIndex,float x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateFloat(String columnName,float x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateInt(int columnIndex,int x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateInt(String columnName,int x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateLong(int columnIndex,long x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateLong(String columnName,long x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateNull(int columnIndex) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateNull(String columnName) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateObject(int columnIndex,Object x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateObject(int columnIndex,Object x,int scale) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateObject(String columnName,Object x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateObject(String columnName,Object x,int scale) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateRef(int columnIndex,java.sql.Ref x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateRef(String columnName,java.sql.Ref x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateRow() throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateShort(int columnIndex,short x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateShort(String columnName,short x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateString(int columnIndex,String x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateString(String columnName,String x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateTime(int columnIndex,java.sql.Time x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateTime(String columnName,java.sql.Time x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateTimestamp(int columnIndex,java.sql.Timestamp x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void updateTimestamp(String columnName,java.sql.Timestamp x) throws java.sql.SQLException { throw new java.sql.SQLException(); } public void deleteRow() throws java.sql.SQLException { throw new java.sql.SQLException(); } public void moveToInsertRow() throws java.sql.SQLException { throw new java.sql.SQLException(); } public void moveToCurrentRow() throws java.sql.SQLException { throw new java.sql.SQLException(); } public void cancelRowUpdates() throws java.sql.SQLException { throw new java.sql.SQLException(); } public void insertRow() throws java.sql.SQLException { throw new java.sql.SQLException(); } public void refreshRow() throws java.sql.SQLException { throw new java.sql.SQLException(); } public int getRow() { return currentRow; } public boolean first() { return false; } public boolean previous() { return false; } public boolean isFirst() { return false; } private boolean afterLast=false; public boolean last() { try{ while(next()); } catch(java.sql.SQLException sqle){} afterLast=true; return true; } public boolean isLast() { return false; } public void beforeFirst() throws java.sql.SQLException { if(relation==null) throw new java.sql.SQLException(); if ((conditionIndex<0)&&(conditionValue!=null)) { iter=relation.index.keySet().iterator(); } else { iter=relation.index.values().iterator(); } currentRow=0; } public boolean isBeforeFirst() { return (currentRow==0); } public void afterLast(){ last(); } public boolean isAfterLast(){return afterLast;} public boolean absolute(int i) { return true; } public boolean relative(int i) { return false; } public boolean rowDeleted() { return false; } public boolean rowInserted() { return false; } public boolean rowUpdated() { return false; } public int getConcurrency() { return 0; } public int getType() { return 0; } public void setFetchSize(int i) throws java.sql.SQLException { statement.setFetchSize(i); } public int getFetchSize() throws java.sql.SQLException { return statement.getFetchSize(); } public void setFetchDirection(int i) throws java.sql.SQLException { statement.setFetchDirection(i); } public int getFetchDirection() throws java.sql.SQLException { return statement.getFetchDirection(); } public int getResultSetConcurrency() throws java.sql.SQLException { return statement.getResultSetConcurrency(); } public int getResultSetType() throws java.sql.SQLException { return statement.getResultSetType(); } public int getHoldability() throws SQLException { return 0; } public Reader getNCharacterStream(int arg0) throws SQLException { return null; } public Reader getNCharacterStream(String arg0) throws SQLException { return null; } public NClob getNClob(int arg0) throws SQLException { return null; } public NClob getNClob(String arg0) throws SQLException { return null; } public String getNString(int arg0) throws SQLException { return null; } public String getNString(String arg0) throws SQLException { return null; } //public Object getObject(int arg0, Map arg1) throws SQLException { return getString(arg0); } public Object getObject(int arg0, Map<String, Class<?>> arg1) throws SQLException { return getString(arg0); } public Object getObject(String arg0, Map<String, Class<?>> arg1) throws SQLException { return getObject(findColumn(arg0),arg1); } //public Object getObject(String arg0, Map arg1) throws SQLException { return getObject(findColumn(arg0),arg1); } public RowId getRowId(int arg0) throws SQLException { return null; } public RowId getRowId(String arg0) throws SQLException { return null; } public SQLXML getSQLXML(int arg0) throws SQLException { return null; } public SQLXML getSQLXML(String arg0) throws SQLException { return null;} public boolean isClosed() throws SQLException { return false; } public void updateAsciiStream(int arg0, InputStream arg1) throws SQLException {} public void updateAsciiStream(String arg0, InputStream arg1) throws SQLException {} public void updateAsciiStream(int arg0, InputStream arg1, long arg2) throws SQLException {} public void updateAsciiStream(String arg0, InputStream arg1, long arg2) throws SQLException {} public void updateBinaryStream(int arg0, InputStream arg1) throws SQLException {} public void updateBinaryStream(String arg0, InputStream arg1) throws SQLException {} public void updateBinaryStream(int arg0, InputStream arg1, long arg2) throws SQLException {} public void updateBinaryStream(String arg0, InputStream arg1, long arg2) throws SQLException {} public void updateBlob(int arg0, InputStream arg1) throws SQLException {} public void updateBlob(String arg0, InputStream arg1) throws SQLException {} public void updateBlob(int arg0, InputStream arg1, long arg2) throws SQLException {} public void updateBlob(String arg0, InputStream arg1, long arg2) throws SQLException {} public void updateCharacterStream(int arg0, Reader arg1) throws SQLException {} public void updateCharacterStream(String arg0, Reader arg1) throws SQLException {} public void updateCharacterStream(int arg0, Reader arg1, long arg2) throws SQLException {} public void updateCharacterStream(String arg0, Reader arg1, long arg2) throws SQLException {} public void updateClob(int arg0, Reader arg1) throws SQLException {} public void updateClob(String arg0, Reader arg1) throws SQLException {} public void updateClob(int arg0, Reader arg1, long arg2) throws SQLException {} public void updateClob(String arg0, Reader arg1, long arg2) throws SQLException {} public void updateNCharacterStream(int arg0, Reader arg1) throws SQLException {} public void updateNCharacterStream(String arg0, Reader arg1) throws SQLException {} public void updateNCharacterStream(int arg0, Reader arg1, long arg2) throws SQLException {} public void updateNCharacterStream(String arg0, Reader arg1, long arg2) throws SQLException {} public void updateNClob(int arg0, NClob arg1) throws SQLException {} public void updateNClob(String arg0, NClob arg1) throws SQLException {} public void updateNClob(int arg0, Reader arg1) throws SQLException {} public void updateNClob(String arg0, Reader arg1) throws SQLException {} public void updateNClob(int arg0, Reader arg1, long arg2) throws SQLException {} public void updateNClob(String arg0, Reader arg1, long arg2)throws SQLException {} public void updateNString(int arg0, String arg1) throws SQLException {} public void updateNString(String arg0, String arg1) throws SQLException {} public void updateRowId(int arg0, RowId arg1) throws SQLException {} public void updateRowId(String arg0, RowId arg1) throws SQLException {} public void updateSQLXML(int arg0, SQLXML arg1) throws SQLException {} public void updateSQLXML(String arg0, SQLXML arg1) throws SQLException {} public boolean isWrapperFor(Class<?> iface) throws SQLException {return false;} public <T> T unwrap(Class<T> iface) throws SQLException {return null;} }<|fim▁end|>
if((!isNumber(s1))||(!isNumber(s2))) return 0; if(isDouble(s1)||(isDouble(s2))) { double d1=isDouble(s1)?s_double(s1):Long.valueOf(s_long(s1)).doubleValue();
<|file_name|>directives.py<|end_file_name|><|fim▁begin|># Define a new directive `code-block` (aliased as `sourcecode`) that uses the # `pygments` source highlighter to render code in color. # # Incorporates code from the `Pygments`_ documentation for `Using Pygments in # ReST documents`_ and `Octopress`_. # # .. _Pygments: http://pygments.org/ # .. _Using Pygments in ReST documents: http://pygments.org/docs/rstdirective/ # .. _Octopress: http://octopress.org/ import re import os import md5 import __main__ # Absolute path to pygments cache dir PYGMENTS_CACHE_DIR = os.path.abspath(os.path.join(os.path.dirname(__main__.__file__), '../../.pygments-cache')) # Ensure cache dir exists if not os.path.exists(PYGMENTS_CACHE_DIR): os.makedirs(PYGMENTS_CACHE_DIR) from pygments.formatters import HtmlFormatter from docutils import nodes from docutils.parsers.rst import directives, Directive from pygments import highlight from pygments.lexers import get_lexer_by_name, TextLexer class Pygments(Directive): """ Source code syntax hightlighting. """ required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True string_opts = ['title', 'url', 'caption'] option_spec = dict([(key, directives.unchanged) for key in string_opts]) has_content = True def run(self): self.assert_has_content() try: lexer_name = self.arguments[0] lexer = get_lexer_by_name(lexer_name) except ValueError: # no lexer found - use the text one instead of an exception lexer_name = 'text' lexer = TextLexer() formatter = HtmlFormatter() # Construct cache filename cache_file = None content_text = u'\n'.join(self.content) cache_file_name = '%s-%s.html' % (lexer_name, md5.new(content_text).hexdigest()) cached_path = os.path.join(PYGMENTS_CACHE_DIR, cache_file_name) # Look for cached version, otherwise parse if os.path.exists(cached_path): cache_file = open(cached_path, 'r') parsed = cache_file.read() else: parsed = highlight(content_text, lexer, formatter) # Strip pre tag and everything outside it pres = re.compile("<pre>(.+)<\/pre>", re.S) stripped = pres.search(parsed).group(1) # Create tabular code with line numbers table = '<div class="highlight"><table><tr><td class="gutter"><pre class="line-numbers">' lined = '' for idx, line in enumerate(stripped.splitlines(True)): table += '<span class="line-number">%d</span>\n' % (idx + 1) lined += '<span class="line">%s</span>' % line table += '</pre></td><td class="code"><pre><code class="%s">%s</code></pre></td></tr></table></div>' % (lexer_name, lined) # Add wrapper with optional caption and link code = '<figure class="code">' if self.options: caption = ('<span>%s</span>' % self.options['caption']) if 'caption' in self.options else ''<|fim▁hole|> code += '<figcaption>%s %s</figcaption>' % (caption, link) code += '%s</figure>' % table # Write cache if cache_file is None: cache_file = open(cached_path, 'w') cache_file.write(parsed) cache_file.close() return [nodes.raw('', code, format='html')] directives.register_directive('code-block', Pygments) directives.register_directive('sourcecode', Pygments)<|fim▁end|>
title = self.options['title'] if 'title' in self.options else 'link' link = ('<a href="%s">%s</a>' % (self.options['url'], title)) if 'url' in self.options else '' if caption or link:
<|file_name|>apps.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from django.apps import AppConfig class SalesforceDb(AppConfig): name = 'salesforce' label = 'salesforce_db'<|fim▁end|>
"""This file is useful only if 'salesforce' is a duplicit name in Django registry then put a string 'salesforce.apps.SalesforceDb' instead of simple 'salesforce' """
<|file_name|>models.py<|end_file_name|><|fim▁begin|>import datetime import os from sqlalchemy import create_engine from sqlalchemy import MetaData, Table, Column, DateTime, Float, Integer, ForeignKey from sqlalchemy import between, func from sqlalchemy.sql import select from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy import Column, String, DateTime, Float, Integer from werkzeug.security import generate_password_hash, check_password_hash from . import db, app class Meter(db.Model): """ A list of meters """ __tablename__ = "meter" meter_id = Column(Integer, primary_key=True, autoincrement=True) user_id = Column(Integer, ForeignKey("user.user_id")) sharing = Column(String(7)) # Public / Private api_key = Column(String(36)) meter_name = Column(String(20)) def delete_meter_data(meter_id): """ Delete meter and all data """ Meter.query.filter(Meter.meter_id == meter_id).delete() db.session.commit() db_loc = f"data/meter_{meter_id}.db" if os.path.isfile(db_loc): os.remove(db_loc) def get_meter_name(meter_id): """ Return a list of meters that the user manages """ meter = Meter.query.filter(Meter.meter_id == meter_id).first() return meter.meter_name def get_meter_api_key(meter_id): """ Return the API key for the meter """ meter = Meter.query.filter(Meter.meter_id == meter_id).first() return meter.api_key def get_user_meters(user_id): """ Return a list of meters that the user manages """ meters = Meter.query.filter(Meter.user_id == user_id) for meter in meters: user_name = User.query.filter_by(user_id=meter.user_id).first().username yield (meter.meter_id, meter.meter_name, user_name) def get_public_meters(): """ Return a list of publicly viewable meters """ meters = Meter.query.filter(Meter.sharing == "public")<|fim▁hole|> yield (meter.meter_id, meter.meter_name, user_name) def visible_meters(user_id): """ Return a list of meters that the user can view """ if user_id: meters = Meter.query.filter( (Meter.user_id == user_id) | (Meter.sharing == "public") ) else: meters = Meter.query.filter(Meter.sharing == "public") for meter in meters: user_name = User.query.filter_by(user_id=meter.user_id).first().username yield (meter.meter_id, meter.meter_name, user_name) class User(db.Model): """ A user account """ __tablename__ = "user" user_id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(64), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) password_hash = db.Column(db.String(128)) apikey = Column(String(128)) def __repr__(self): return "<User {}>".format(self.username) def set_password(self, password): self.password_hash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password_hash, password) def is_active(self): """True, as all users are active.""" return True def get_id(self): """Return the email address to satisfy Flask-Login's requirements.""" return self.user_id def is_authenticated(self): """Return True if the user is authenticated.""" return True def is_anonymous(self): """False, as anonymous users aren't supported.""" return False<|fim▁end|>
for meter in meters: user_name = User.query.filter_by(user_id=meter.user_id).first().username
<|file_name|>SecurityConfig.java<|end_file_name|><|fim▁begin|>package br.eti.qisolucoes.contactcloud.config; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Configuration; import org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder; import org.springframework.security.config.annotation.web.builders.HttpSecurity; import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity; import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; import org.springframework.security.core.userdetails.UserDetailsService; import org.springframework.security.web.util.matcher.AntPathRequestMatcher; @Configuration @EnableWebSecurity public class SecurityConfig extends WebSecurityConfigurerAdapter { @Autowired private UserDetailsService userDetailsService; @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests().antMatchers("/").authenticated() .antMatchers("/theme/**", "/plugins/**", "/page/**", "/", "/usuario/form", "/usuario/salvar").permitAll() .anyRequest().authenticated() .and()<|fim▁hole|> .formLogin() .loginProcessingUrl("/login").loginPage("/login").permitAll().defaultSuccessUrl("/agenda/abrir", true) .and() .logout().logoutRequestMatcher(new AntPathRequestMatcher("/logout")).logoutSuccessUrl("/"); } @Override protected void configure(AuthenticationManagerBuilder auth) throws Exception { super.configure(auth); auth.userDetailsService(userDetailsService); } }<|fim▁end|>
<|file_name|>sensor.py<|end_file_name|><|fim▁begin|>"""Bitcoin information service that uses blockchain.info.""" from datetime import timedelta import logging from blockchain import exchangerates, statistics import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ATTR_ATTRIBUTION, CONF_CURRENCY, CONF_DISPLAY_OPTIONS import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) ATTRIBUTION = "Data provided by blockchain.info" DEFAULT_CURRENCY = "USD" ICON = "mdi:currency-btc" SCAN_INTERVAL = timedelta(minutes=5) OPTION_TYPES = { "exchangerate": ["Exchange rate (1 BTC)", None], "trade_volume_btc": ["Trade volume", "BTC"], "miners_revenue_usd": ["Miners revenue", "USD"], "btc_mined": ["Mined", "BTC"], "trade_volume_usd": ["Trade volume", "USD"], "difficulty": ["Difficulty", None], "minutes_between_blocks": ["Time between Blocks", "min"], "number_of_transactions": ["No. of Transactions", None], "hash_rate": ["Hash rate", "PH/s"], "timestamp": ["Timestamp", None], "mined_blocks": ["Mined Blocks", None], "blocks_size": ["Block size", None], "total_fees_btc": ["Total fees", "BTC"], "total_btc_sent": ["Total sent", "BTC"], "estimated_btc_sent": ["Estimated sent", "BTC"], "total_btc": ["Total", "BTC"], "total_blocks": ["Total Blocks", None], "next_retarget": ["Next retarget", None], "estimated_transaction_volume_usd": ["Est. Transaction volume", "USD"], "miners_revenue_btc": ["Miners revenue", "BTC"], "market_price_usd": ["Market price", "USD"], } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_DISPLAY_OPTIONS, default=[]): vol.All( cv.ensure_list, [vol.In(OPTION_TYPES)] ), vol.Optional(CONF_CURRENCY, default=DEFAULT_CURRENCY): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Bitcoin sensors.""" currency = config.get(CONF_CURRENCY) if currency not in exchangerates.get_ticker(): _LOGGER.warning("Currency %s is not available. Using USD", currency) currency = DEFAULT_CURRENCY data = BitcoinData() dev = [] for variable in config[CONF_DISPLAY_OPTIONS]: dev.append(BitcoinSensor(data, variable, currency)) add_entities(dev, True) class BitcoinSensor(Entity): """Representation of a Bitcoin sensor.""" def __init__(self, data, option_type, currency): """Initialize the sensor.""" self.data = data self._name = OPTION_TYPES[option_type][0] self._unit_of_measurement = OPTION_TYPES[option_type][1] self._currency = currency self.type = option_type self._state = None @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._unit_of_measurement @property def icon(self): """Return the icon to use in the frontend, if any.""" return ICON @property def device_state_attributes(self): """Return the state attributes of the sensor.""" return {ATTR_ATTRIBUTION: ATTRIBUTION} def update(self): """Get the latest data and updates the states.""" self.data.update() stats = self.data.stats ticker = self.data.ticker if self.type == "exchangerate": self._state = ticker[self._currency].p15min self._unit_of_measurement = self._currency elif self.type == "trade_volume_btc": self._state = "{0:.1f}".format(stats.trade_volume_btc) elif self.type == "miners_revenue_usd": self._state = "{0:.0f}".format(stats.miners_revenue_usd) elif self.type == "btc_mined": self._state = "{}".format(stats.btc_mined * 0.00000001) elif self.type == "trade_volume_usd": self._state = "{0:.1f}".format(stats.trade_volume_usd)<|fim▁hole|> elif self.type == "difficulty": self._state = "{0:.0f}".format(stats.difficulty) elif self.type == "minutes_between_blocks": self._state = "{0:.2f}".format(stats.minutes_between_blocks) elif self.type == "number_of_transactions": self._state = "{}".format(stats.number_of_transactions) elif self.type == "hash_rate": self._state = "{0:.1f}".format(stats.hash_rate * 0.000001) elif self.type == "timestamp": self._state = stats.timestamp elif self.type == "mined_blocks": self._state = "{}".format(stats.mined_blocks) elif self.type == "blocks_size": self._state = "{0:.1f}".format(stats.blocks_size) elif self.type == "total_fees_btc": self._state = "{0:.2f}".format(stats.total_fees_btc * 0.00000001) elif self.type == "total_btc_sent": self._state = "{0:.2f}".format(stats.total_btc_sent * 0.00000001) elif self.type == "estimated_btc_sent": self._state = "{0:.2f}".format(stats.estimated_btc_sent * 0.00000001) elif self.type == "total_btc": self._state = "{0:.2f}".format(stats.total_btc * 0.00000001) elif self.type == "total_blocks": self._state = "{0:.0f}".format(stats.total_blocks) elif self.type == "next_retarget": self._state = "{0:.2f}".format(stats.next_retarget) elif self.type == "estimated_transaction_volume_usd": self._state = "{0:.2f}".format(stats.estimated_transaction_volume_usd) elif self.type == "miners_revenue_btc": self._state = "{0:.1f}".format(stats.miners_revenue_btc * 0.00000001) elif self.type == "market_price_usd": self._state = "{0:.2f}".format(stats.market_price_usd) class BitcoinData: """Get the latest data and update the states.""" def __init__(self): """Initialize the data object.""" self.stats = None self.ticker = None def update(self): """Get the latest data from blockchain.info.""" self.stats = statistics.get() self.ticker = exchangerates.get_ticker()<|fim▁end|>
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! Compiler utilities. //! //! This module contains a number of utilities used throughput the compiler, such as unique symbol //! and ID generators and a module for measuring compile-time performance of various aspects of the //! compiler. use fnv; use crate::ast::ExprKind::*; use crate::ast::*;<|fim▁hole|>use std::iter; pub mod colors; pub mod dump; pub mod stats; /// Utility struct that can track and generate unique IDs and symbols for use in an expression. /// Each SymbolGenerator tracks the maximum ID used for every symbol name, and can be used to /// create new symbols with the same name but a unique ID. #[derive(Debug, Clone)] pub struct SymbolGenerator { id_map: fnv::FnvHashMap<String, i32>, } impl SymbolGenerator { /// Initialize a SymbolGenerator with no existing symbols. pub fn new() -> SymbolGenerator { SymbolGenerator { id_map: fnv::FnvHashMap::default(), } } /// Initialize a SymbolGenerator from all the symbols defined in an expression. pub fn from_expression(expr: &Expr) -> SymbolGenerator { let mut id_map: fnv::FnvHashMap<String, i32> = fnv::FnvHashMap::default(); let update_id = |id_map: &mut fnv::FnvHashMap<String, i32>, symbol: &Symbol| { let id = id_map.entry(symbol.name().clone()).or_insert(0); *id = max(*id, symbol.id()); }; expr.traverse(&mut |e| match e.kind { Let { ref name, .. } => update_id(&mut id_map, name), Ident(ref sym) => update_id(&mut id_map, sym), Lambda { ref params, .. } => { for p in params.iter() { update_id(&mut id_map, &p.name); } } _ => {} }); SymbolGenerator { id_map } } pub fn new_symbol(&mut self, name: &str) -> Symbol { let id = self.id_map.entry(name.to_owned()).or_insert(-1); *id += 1; Symbol::new(name, *id) } } pub fn join<T: iter::Iterator<Item = String>>( start: &str, sep: &str, end: &str, strings: T, ) -> String { let mut res = String::new(); res.push_str(start); for (i, s) in strings.enumerate() { if i > 0 { res.push_str(sep); } res.push_str(&s); } res.push_str(end); res }<|fim▁end|>
use std::cmp::max;
<|file_name|>test_convert.py<|end_file_name|><|fim▁begin|>from tempfile import TemporaryFile import pytest from imgpy import Img @pytest.mark.parametrize('image', ({ 'sub': 'anima/bordered.gif', 'convert': 'L', 'mode': 'P' }, { 'sub': 'anima/clear.gif', 'convert': 'L', 'mode': 'P' }, { 'sub': 'fixed/bordered.jpg', 'convert': 'L', 'mode': 'L'<|fim▁hole|> 'mode': 'L' }, )) def test_convert(path, image): with Img(fp=path(image['sub'])) as src, TemporaryFile() as tf: src.convert(image['convert']) src.save(fp=tf) with Img(fp=tf) as dest: assert (dest.width, dest.height, dest.mode, dest.frame_count) == ( src.width, src.height, image['mode'], src.frame_count)<|fim▁end|>
}, { 'sub': 'fixed/clear.jpg', 'convert': 'L',
<|file_name|>tbb.rs<|end_file_name|><|fim▁begin|>use crate::core::bits::Bits; use crate::core::instruction::{Instruction, Reg2RnRmParams}; use crate::core::register::Reg;<|fim▁hole|>pub fn decode_TBB_t1(opcode: u32) -> Instruction { let rn = opcode.get_bits(16..20); let rm = opcode.get_bits(0..4); Instruction::TBB { params: Reg2RnRmParams { rn: Reg::from(rn), rm: Reg::from(rm), }, } }<|fim▁end|>
#[allow(non_snake_case)]
<|file_name|>test_core.py<|end_file_name|><|fim▁begin|>""" Tests for core and utility classes """ import unittest from pandas_accounting import Company, Subsidiary<|fim▁hole|>class TestCompany(unittest.TestCase): def setUp(self): self.C = Company(shares=100) def test_company_shares(self): self.assertEqual(self.C.shares, 100) class TestSubsidiary(unittest.TestCase): def setUp(self): self.S = Subsidiary() def test_sub(self): self.assertEqual(0,0)<|fim▁end|>
<|file_name|>chromeManifestModify.js<|end_file_name|><|fim▁begin|>import fs from 'fs' import path from 'path' const Jimp = require('jimp') const hjson = require('hjson') const electronImageResize = require('./electronImageResize') const {getPath1,getPath2} = require('./chromeExtensionUtil') const contentScriptName = '___contentScriptModify_.js' const backgroundScriptName = '___backgroundScriptModify_.js' const polyfillName = 'browser-polyfill.min.js' const webExtModifyBg = 'webextensionModifyBg.js' const webExtModifyCs = 'webextensionModifyCs.js' const webExtStyleName = 'webextension.css' const backgroundHtmlName = '___backgroundModify_.html' let backgroundHtmlStr = `<!DOCTYPE html> <head><meta charset="UTF-8"></head> <body> <script src="${backgroundScriptName}"></script> __REPLACE__ </body></html>` function findJsTags(obj,callback){ if(obj.js){ obj.js = callback(obj.js) } if(Array.isArray(obj)) { for(let ele of obj){ findJsTags(ele,callback) } } else if(obj instanceof Object){ for(let [key,ele] of Object.entries(obj)){ if(key != 'js') findJsTags(ele,callback) } } } function copyModifyFile(to,flagContent,flagBackground,isWebExt){ if(flagContent){ const cont = fs.readFileSync(path.join(__dirname,'../src/extension/contentScriptModify.js')).toString() const contPath = path.join(to,contentScriptName) if(fs.existsSync(contPath)) fs.unlinkSync(contPath) fs.writeFileSync(contPath,cont) } if(flagBackground){ const bg = fs.readFileSync(path.join(__dirname,'../src/extension/backgroundScriptModify.js')).toString() const bgPath = path.join(to,backgroundScriptName) if(fs.existsSync(bgPath)) fs.unlinkSync(bgPath) fs.writeFileSync(bgPath,bg) } if(isWebExt){ for(let file of [polyfillName,webExtModifyBg,webExtModifyCs,webExtStyleName]){ const poli = fs.readFileSync(path.join(__dirname,`../resource/${file}`)).toString() const poliPath = path.join(to,file) if(fs.existsSync(poliPath)) fs.unlinkSync(poliPath) fs.writeFileSync(poliPath,poli) } } } let cache = new Set() function htmlModify(verPath,fname,isWebExt){ const dirName = path.dirname(fname) const backStr = dirName == '.' ? dirName : dirName.split(/[\/\\]/).map(x=>'..').join('/') console.log(verPath,fname,dirName,backStr) const fullPath = path.join(verPath,dirName,path.basename(fname).split("?")[0]) if(cache.has(fullPath) || !fs.existsSync(fullPath)) return cache.add(fullPath) const str = fs.readFileSync(fullPath).toString() if(str.includes(backgroundScriptName)) return fs.unlinkSync(fullPath) let writeStr = str.replace(/< *(head)([^>]*)>/i,`<$1$2>\n ${isWebExt ? `<script src="${backStr}/${polyfillName}"></script>\n<script src="${backStr}/${webExtModifyBg}"></script>\n` : ''}<script src="${backStr}/${backgroundScriptName}"></script>`) if(!writeStr.includes(backgroundScriptName)){ writeStr = str.replace(/< *(body)([^>]*)>/i,`<$1$2>\n ${isWebExt ? `<script src="${backStr}/${polyfillName}"></script>\n<script src="${backStr}/${webExtModifyBg}"></script>\n` : ''}<script src="${backStr}/${backgroundScriptName}"></script>`) } if(!writeStr.includes(backgroundScriptName)){ writeStr = str.replace(/html>/i,`html>\n ${isWebExt ? `<script src="${backStr}/${polyfillName}"></script>\n<script src="${backStr}/${webExtModifyBg}"></script>\n<link rel="stylesheet" href="${backStr}/${webExtStyleName}">\n` : ''}\n<script src="${backStr}/${backgroundScriptName}"></script>`) } if(!writeStr.includes(backgroundScriptName)){ writeStr = `<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Background</title> ${isWebExt ? `<script src="${backStr}/${polyfillName}"></script>\n<script src="${backStr}/${webExtModifyBg}"></script>\n` : ''} <script src="${backStr}/${backgroundScriptName}"></script> ${writeStr} </head> <body> </body> </html>` } fs.writeFileSync(fullPath,writeStr) } function removeBom(x){ return x.charCodeAt(0) === 0xFEFF ? x.slice(1) : x } export default async function modify(extensionId,verPath){ const isWebExt = !extensionId.match(/(^[a-z]+$)|(_chrome_$)/) cache = new Set() if(!verPath){ verPath = getPath2(extensionId) || getPath1(extensionId) //getPath1(extensionId) } const manifestPath = path.join(verPath, 'manifest.json') const exists = fs.existsSync(manifestPath) if (exists) { const manifestStr = removeBom(fs.readFileSync(manifestPath).toString()).replace('\\u003Call_urls>','<all_urls>') const infos = hjson.parse(manifestStr) if(!infos.key || infos.key.match(/[\-\.]/)){ infos.key = new Buffer(extensionId).toString('base64') } if(infos.permissions && infos.permissions.includes('activeTab') && (!infos.permissions.includes('http://*/*') || !infos.permissions.includes('https://*/*'))){ infos.permissions = [...new Set([...infos.permissions,'http://*/*','https://*/*'])] } if(infos.optional_permissions){ infos.permissions = [...new Set([...(infos.permissions || []),...infos.optional_permissions])] } if(!infos.content_security_policy){ infos.content_security_policy = "script-src 'self' 'unsafe-eval'; object-src 'self'" } if(isWebExt && infos.permissions){ infos.permissions = infos.permissions.filter(x=>x!=='clipboardWrite' && x!=='clipboardRead') const ind = infos.permissions.findIndex(x=>x=='menus') if(ind != -1) infos.permissions[ind] = 'contextMenus' } let flagContent,flagBackground if(infos.content_scripts){ findJsTags(infos.content_scripts,js=>{ if(!Array.isArray(js)) js = [js] if(isWebExt && !js.includes(polyfillName)){ js.unshift(webExtModifyCs) js.unshift(polyfillName) } if(!js.includes(contentScriptName)) js.unshift(contentScriptName) return js }) flagContent = true } let open const imageResize = new electronImageResize() try{ if(infos.background){ if(infos.background.persistent === false && !['jpkfjicglakibpenojifdiepckckakgk','occjjkgifpmdgodlplnacmkejpdionan'].includes(extensionId)){ infos.background.persistent = true } if(infos.background.page){ htmlModify(verPath,infos.background.page,isWebExt) } else if(infos.background.scripts){ if(!Array.isArray(infos.background.scripts)) infos.background.scripts = [infos.background.scripts] if(isWebExt) backgroundHtmlStr = backgroundHtmlStr.replace('<body>',`<body>\n<script src="${polyfillName}"></script>\n<script src="${webExtModifyBg}"></script>`) const content = backgroundHtmlStr.replace('__REPLACE__',infos.background.scripts.map(src=>`<script src="${src}"></script>`).join("\n ")) fs.writeFileSync(path.join(verPath,backgroundHtmlName),content) infos.background.page = backgroundHtmlName delete infos.background.scripts } flagBackground = true } if(infos.options_page){ htmlModify(verPath,infos.options_page,isWebExt) flagBackground = true } if(infos.options_ui && infos.options_ui.page){ if(!infos.options_page) infos.options_page = infos.options_ui.page htmlModify(verPath,infos.options_ui.page,isWebExt)<|fim▁hole|> } if(infos.page_action && infos.page_action.default_popup){ htmlModify(verPath,infos.page_action.default_popup,isWebExt) flagBackground = true } if(infos.browser_action && infos.browser_action.default_popup){ htmlModify(verPath,infos.browser_action.default_popup,isWebExt) flagBackground = true } if(infos.web_accessible_resources){ for(let file of infos.web_accessible_resources){ if(file.match(/\.html?$/)){ htmlModify(verPath,file,isWebExt) flagBackground = true } } } if(infos.chrome_url_overrides){ for(let file of Object.values(infos.chrome_url_overrides)){ htmlModify(verPath,file,isWebExt) flagBackground = true } } if(infos.page_action){ if(!infos.browser_action){ infos.browser_action = infos.page_action if(infos.browser_action.show){ infos.browser_action.enable = infos.browser_action.show delete infos.browser_action.show } if(infos.browser_action.hide){ infos.browser_action.disable = infos.browser_action.hide delete infos.browser_action.hide } } delete infos.page_action } for(let file of require("glob").sync(`${verPath}/**/*.html`)){ console.log(222444,verPath,file.replace(`${verPath}/`,''),isWebExt) htmlModify(verPath,file.replace(`${verPath.replace(/\\/g,'/')}/`,''),isWebExt) } for(let file of require("glob").sync(`${verPath}/**/*.js`)){ let datas = fs.readFileSync(file).toString(),needWrite = false if (isWebExt && datas.includes('moz-extension')) { // console.log(file) datas = datas.replace(/moz\-extension/ig,'chrome-extension') needWrite = true } if(extensionId == 'mlomiejdfkolichcflejclcbmpeaniij' && datas.includes('about:blank')){ datas = datas.replace(/about:blank/ig,'chrome-extension://dckpbojndfoinamcdamhkjhnjnmjkfjd/blank.html') needWrite = true } if(needWrite){ fs.writeFileSync(file, datas) } } if(infos.commands){ for(let [k,v] of Object.entries(infos.commands)) { if(v.suggested_key){ for(let [k2,v2] of Object.entries(v.suggested_key)){ if(v2.match(/^F\d+$/)){ delete infos.commands[k] break } } } if (k == '_execute_browser_action' || k == '_execute_page_action') continue if (!v.description) v.description = "description" } } copyModifyFile(verPath,flagContent,flagBackground,isWebExt) fs.unlinkSync(manifestPath) fs.writeFileSync(manifestPath,JSON.stringify(infos, null, ' ')) console.log(33332001) for(let svg of require("glob").sync(`${verPath}/**/*.svg`)){ const out = svg.replace(/\.svg$/,".png") if(!fs.existsSync(out)){ if(!open){ imageResize.open({width: 16, height: 16}) open = true } console.log(`file://${svg}`) const url = path.join(path.parse(svg).dir,'svg.html') fs.writeFileSync(url,`<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <style type="text/css"> img,svg{ width: 100%; height: 100%; } </style> </head> <body> <img src="${svg}"/> </body> </html>`) const img = await imageResize.capture({url: `file://${url}`, width: 16, height: 16}) Jimp.read(img.toPNG(), function (err, image) { if(image.bitmap.width > image.bitmap.height){ image = image.resize(16,Jimp.AUTO,Jimp.RESIZE_BICUBIC) } else{ image = image.resize(Jimp.AUTO,16,Jimp.RESIZE_BICUBIC) } image.write(out) }) } } if(open) imageResize.close() }catch(e){ if(open) imageResize.close() console.log(33332002,e) } // if(isWebExt){ // for(let js of require("glob").sync(`${verPath}/**/*.js`)){ // const datas = fs.readFileSync(js).toString() // if(datas.match(/document.execCommand\( *(["'])copy\1 *\)/)){ // const result = datas.replace(/document.execCommand\( *(["'])copy\1 *\)/,`chrome.ipcRenderer.send('execCommand-copy')`) // fs.writeFileSync(js, result) // } // } // } } }<|fim▁end|>
flagBackground = true
<|file_name|>DataSourceSmartOS.py<|end_file_name|><|fim▁begin|># vi: ts=4 expandtab # # Copyright (C) 2013 Canonical Ltd. # # Author: Ben Howard <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # Datasource for provisioning on SmartOS. This works on Joyent # and public/private Clouds using SmartOS. # # SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests # The meta-data is transmitted via key/value pairs made by # requests on the console. For example, to get the hostname, you # would send "GET hostname" on /dev/ttyS1. # For Linux Guests running in LX-Brand Zones on SmartOS hosts # a socket (/native/.zonecontrol/metadata.sock) is used instead # of a serial console. # # Certain behavior is defined by the DataDictionary # http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html # Comments with "@datadictionary" are snippets of the definition import base64 import binascii import json import os import random import re import socket from cloudinit import log as logging from cloudinit import serial from cloudinit import sources from cloudinit import util LOG = logging.getLogger(__name__) SMARTOS_ATTRIB_MAP = { # Cloud-init Key : (SmartOS Key, Strip line endings) 'instance-id': ('sdc:uuid', True), 'local-hostname': ('hostname', True), 'public-keys': ('root_authorized_keys', True), 'user-script': ('user-script', False), 'legacy-user-data': ('user-data', False), 'user-data': ('cloud-init:user-data', False), 'iptables_disable': ('iptables_disable', True), 'motd_sys_info': ('motd_sys_info', True), 'availability_zone': ('sdc:datacenter_name', True), 'vendor-data': ('sdc:vendor-data', False), 'operator-script': ('sdc:operator-script', False), 'hostname': ('sdc:hostname', True), 'dns_domain': ('sdc:dns_domain', True), } SMARTOS_ATTRIB_JSON = { # Cloud-init Key : (SmartOS Key known JSON) 'network-data': 'sdc:nics', 'dns_servers': 'sdc:resolvers', 'routes': 'sdc:routes', } SMARTOS_ENV_LX_BRAND = "lx-brand" SMARTOS_ENV_KVM = "kvm" DS_NAME = 'SmartOS' DS_CFG_PATH = ['datasource', DS_NAME] NO_BASE64_DECODE = [ 'iptables_disable', 'motd_sys_info', 'root_authorized_keys', 'sdc:datacenter_name', 'sdc:uuid' 'user-data', 'user-script', ] METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock' SERIAL_DEVICE = '/dev/ttyS1' SERIAL_TIMEOUT = 60 # BUILT-IN DATASOURCE CONFIGURATION # The following is the built-in configuration. If the values # are not set via the system configuration, then these default # will be used: # serial_device: which serial device to use for the meta-data # serial_timeout: how long to wait on the device # no_base64_decode: values which are not base64 encoded and # are fetched directly from SmartOS, not meta-data values # base64_keys: meta-data keys that are delivered in base64 # base64_all: with the exclusion of no_base64_decode values, # treat all meta-data as base64 encoded # disk_setup: describes how to partition the ephemeral drive # fs_setup: describes how to format the ephemeral drive # BUILTIN_DS_CONFIG = { 'serial_device': SERIAL_DEVICE, 'serial_timeout': SERIAL_TIMEOUT, 'metadata_sockfile': METADATA_SOCKFILE, 'no_base64_decode': NO_BASE64_DECODE, 'base64_keys': [], 'base64_all': False, 'disk_aliases': {'ephemeral0': '/dev/vdb'}, } BUILTIN_CLOUD_CONFIG = { 'disk_setup': { 'ephemeral0': {'table_type': 'mbr', 'layout': False, 'overwrite': False} }, 'fs_setup': [{'label': 'ephemeral0', 'filesystem': 'ext3', 'device': 'ephemeral0'}], } # builtin vendor-data is a boothook that writes a script into # /var/lib/cloud/scripts/per-boot. *That* script then handles # executing the 'operator-script' and 'user-script' files # that cloud-init writes into /var/lib/cloud/instance/data/ # if they exist. # # This is all very indirect, but its done like this so that at # some point in the future, perhaps cloud-init wouldn't do it at # all, but rather the vendor actually provide vendor-data that accomplished # their desires. (That is the point of vendor-data). # # cloud-init does cheat a bit, and write the operator-script and user-script # itself. It could have the vendor-script do that, but it seems better # to not require the image to contain a tool (mdata-get) to read those # keys when we have a perfectly good one inside cloud-init. BUILTIN_VENDOR_DATA = """\ #cloud-boothook #!/bin/sh fname="%(per_boot_d)s/01_smartos_vendor_data.sh" mkdir -p "${fname%%/*}" cat > "$fname" <<"END_SCRIPT" #!/bin/sh ## # This file is written as part of the default vendor data for SmartOS. # The SmartOS datasource writes the listed file from the listed metadata key # sdc:operator-script -> %(operator_script)s # user-script -> %(user_script)s # # You can view content with 'mdata-get <key>' # for script in "%(operator_script)s" "%(user_script)s"; do [ -x "$script" ] || continue echo "executing '$script'" 1>&2 "$script" done END_SCRIPT chmod +x "$fname" """ # @datadictionary: this is legacy path for placing files from metadata # per the SmartOS location. It is not preferable, but is done for # legacy reasons LEGACY_USER_D = "/var/db" class DataSourceSmartOS(sources.DataSource): _unset = "_unset" smartos_type = _unset md_client = _unset def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} self.network_data = None self._network_config = None self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) self._init() def __str__(self): root = sources.DataSource.__str__(self) return "%s [client=%s]" % (root, self.md_client) def _init(self): if self.smartos_type == self._unset: self.smartos_type = get_smartos_environ() if self.smartos_type is None: self.md_client = None if self.md_client == self._unset: self.md_client = jmc_client_factory( smartos_type=self.smartos_type, metadata_sockfile=self.ds_cfg['metadata_sockfile'], serial_device=self.ds_cfg['serial_device'], serial_timeout=self.ds_cfg['serial_timeout']) def _set_provisioned(self): '''Mark the instance provisioning state as successful. When run in a zone, the host OS will look for /var/svc/provisioning to be renamed as /var/svc/provision_success. This should be done after meta-data is successfully retrieved and from this point the host considers the provision of the zone to be a success and keeps the zone running. ''' LOG.debug('Instance provisioning state set as successful') svc_path = '/var/svc' if os.path.exists('/'.join([svc_path, 'provisioning'])): os.rename('/'.join([svc_path, 'provisioning']), '/'.join([svc_path, 'provision_success'])) def get_data(self): self._init() md = {} ud = "" if not self.smartos_type: LOG.debug("Not running on smartos") return False if not self.md_client.exists(): LOG.debug("No metadata device '%r' found for SmartOS datasource", self.md_client) return False for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): smartos_noun, strip = attribute md[ci_noun] = self.md_client.get(smartos_noun, strip=strip) for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items(): md[ci_noun] = self.md_client.get_json(smartos_noun) # @datadictionary: This key may contain a program that is written # to a file in the filesystem of the guest on each boot and then # executed. It may be of any format that would be considered # executable in the guest instance. # # We write 'user-script' and 'operator-script' into the # instance/data directory. The default vendor-data then handles # executing them later. data_d = os.path.join(self.paths.get_cpath(), 'instances', md['instance-id'], 'data') user_script = os.path.join(data_d, 'user-script') u_script_l = "%s/user-script" % LEGACY_USER_D write_boot_content(md.get('user-script'), content_f=user_script, link=u_script_l, shebang=True, mode=0o700) operator_script = os.path.join(data_d, 'operator-script') write_boot_content(md.get('operator-script'), content_f=operator_script, shebang=False, mode=0o700) # @datadictionary: This key has no defined format, but its value # is written to the file /var/db/mdata-user-data on each boot prior # to the phase that runs user-script. This file is not to be executed. # This allows a configuration file of some kind to be injected into # the machine to be consumed by the user-script when it runs. u_data = md.get('legacy-user-data') u_data_f = "%s/mdata-user-data" % LEGACY_USER_D write_boot_content(u_data, u_data_f) # Handle the cloud-init regular meta if not md['local-hostname']: md['local-hostname'] = md['instance-id'] ud = None<|fim▁hole|> ud = md['user-data'] if not md['vendor-data']: md['vendor-data'] = BUILTIN_VENDOR_DATA % { 'user_script': user_script, 'operator_script': operator_script, 'per_boot_d': os.path.join(self.paths.get_cpath("scripts"), 'per-boot'), } self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud self.vendordata_raw = md['vendor-data'] self.network_data = md['network-data'] self._set_provisioned() return True def device_name_to_device(self, name): return self.ds_cfg['disk_aliases'].get(name) def get_config_obj(self): if self.smartos_type == SMARTOS_ENV_KVM: return BUILTIN_CLOUD_CONFIG return {} def get_instance_id(self): return self.metadata['instance-id'] @property def network_config(self): if self._network_config is None: if self.network_data is not None: self._network_config = ( convert_smartos_network_data( network_data=self.network_data, dns_servers=self.metadata['dns_servers'], dns_domain=self.metadata['dns_domain'])) return self._network_config class JoyentMetadataFetchException(Exception): pass class JoyentMetadataClient(object): """ A client implementing v2 of the Joyent Metadata Protocol Specification. The full specification can be found at http://eng.joyent.com/mdata/protocol.html """ line_regex = re.compile( r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)' r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)' r'( (?P<payload>.+))?)') def __init__(self, smartos_type=None, fp=None): if smartos_type is None: smartos_type = get_smartos_environ() self.smartos_type = smartos_type self.fp = fp def _checksum(self, body): return '{0:08x}'.format( binascii.crc32(body.encode('utf-8')) & 0xffffffff) def _get_value_from_frame(self, expected_request_id, frame): frame_data = self.line_regex.match(frame).groupdict() if int(frame_data['length']) != len(frame_data['body']): raise JoyentMetadataFetchException( 'Incorrect frame length given ({0} != {1}).'.format( frame_data['length'], len(frame_data['body']))) expected_checksum = self._checksum(frame_data['body']) if frame_data['checksum'] != expected_checksum: raise JoyentMetadataFetchException( 'Invalid checksum (expected: {0}; got {1}).'.format( expected_checksum, frame_data['checksum'])) if frame_data['request_id'] != expected_request_id: raise JoyentMetadataFetchException( 'Request ID mismatch (expected: {0}; got {1}).'.format( expected_request_id, frame_data['request_id'])) if not frame_data.get('payload', None): LOG.debug('No value found.') return None value = util.b64d(frame_data['payload']) LOG.debug('Value "%s" found.', value) return value def request(self, rtype, param=None): request_id = '{0:08x}'.format(random.randint(0, 0xffffffff)) message_body = ' '.join((request_id, rtype,)) if param: message_body += ' ' + base64.b64encode(param.encode()).decode() msg = 'V2 {0} {1} {2}\n'.format( len(message_body), self._checksum(message_body), message_body) LOG.debug('Writing "%s" to metadata transport.', msg) need_close = False if not self.fp: self.open_transport() need_close = True self.fp.write(msg.encode('ascii')) self.fp.flush() response = bytearray() response.extend(self.fp.read(1)) while response[-1:] != b'\n': response.extend(self.fp.read(1)) if need_close: self.close_transport() response = response.rstrip().decode('ascii') LOG.debug('Read "%s" from metadata transport.', response) if 'SUCCESS' not in response: return None value = self._get_value_from_frame(request_id, response) return value def get(self, key, default=None, strip=False): result = self.request(rtype='GET', param=key) if result is None: return default if result and strip: result = result.strip() return result def get_json(self, key, default=None): result = self.get(key, default=default) if result is None: return default return json.loads(result) def list(self): result = self.request(rtype='KEYS') if result: result = result.split('\n') return result def put(self, key, val): param = b' '.join([base64.b64encode(i.encode()) for i in (key, val)]).decode() return self.request(rtype='PUT', param=param) def delete(self, key): return self.request(rtype='DELETE', param=key) def close_transport(self): if self.fp: self.fp.close() self.fp = None def __enter__(self): if self.fp: return self self.open_transport() return self def __exit__(self, exc_type, exc_value, traceback): self.close_transport() return def open_transport(self): raise NotImplementedError class JoyentMetadataSocketClient(JoyentMetadataClient): def __init__(self, socketpath, smartos_type=SMARTOS_ENV_LX_BRAND): super(JoyentMetadataSocketClient, self).__init__(smartos_type) self.socketpath = socketpath def open_transport(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(self.socketpath) self.fp = sock.makefile('rwb') def exists(self): return os.path.exists(self.socketpath) def __repr__(self): return "%s(socketpath=%s)" % (self.__class__.__name__, self.socketpath) class JoyentMetadataSerialClient(JoyentMetadataClient): def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM): super(JoyentMetadataSerialClient, self).__init__(smartos_type) self.device = device self.timeout = timeout def exists(self): return os.path.exists(self.device) def open_transport(self): ser = serial.Serial(self.device, timeout=self.timeout) if not ser.isOpen(): raise SystemError("Unable to open %s" % self.device) self.fp = ser def __repr__(self): return "%s(device=%s, timeout=%s)" % ( self.__class__.__name__, self.device, self.timeout) class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient): """V1 of the protocol was not safe for all values. Thus, we allowed the user to pass values in as base64 encoded. Users may still reasonably expect to be able to send base64 data and have it transparently decoded. So even though the V2 format is now used, and is safe (using base64 itself), we keep legacy support. The way for a user to do this was: a.) specify 'base64_keys' key whose value is a comma delimited list of keys that were base64 encoded. b.) base64_all: string interpreted as a boolean that indicates if all keys are base64 encoded. c.) set a key named b64-<keyname> with a boolean indicating that <keyname> is base64 encoded.""" def __init__(self, device, timeout=10, smartos_type=None): s = super(JoyentMetadataLegacySerialClient, self) s.__init__(device, timeout, smartos_type) self.base64_keys = None self.base64_all = None def _init_base64_keys(self, reset=False): if reset: self.base64_keys = None self.base64_all = None keys = None if self.base64_all is None: keys = self.list() if 'base64_all' in keys: self.base64_all = util.is_true(self._get("base64_all")) else: self.base64_all = False if self.base64_all: # short circuit if base64_all is true return if self.base64_keys is None: if keys is None: keys = self.list() b64_keys = set() if 'base64_keys' in keys: b64_keys = set(self._get("base64_keys").split(",")) # now add any b64-<keyname> that has a true value for key in [k[3:] for k in keys if k.startswith("b64-")]: if util.is_true(self._get(key)): b64_keys.add(key) else: if key in b64_keys: b64_keys.remove(key) self.base64_keys = b64_keys def _get(self, key, default=None, strip=False): return (super(JoyentMetadataLegacySerialClient, self). get(key, default=default, strip=strip)) def is_b64_encoded(self, key, reset=False): if key in NO_BASE64_DECODE: return False self._init_base64_keys(reset=reset) if self.base64_all: return True return key in self.base64_keys def get(self, key, default=None, strip=False): mdefault = object() val = self._get(key, strip=False, default=mdefault) if val is mdefault: return default if self.is_b64_encoded(key): try: val = base64.b64decode(val.encode()).decode() # Bogus input produces different errors in Python 2 and 3 except (TypeError, binascii.Error): LOG.warn("Failed base64 decoding key '%s': %s", key, val) if strip: val = val.strip() return val def jmc_client_factory( smartos_type=None, metadata_sockfile=METADATA_SOCKFILE, serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT, uname_version=None): if smartos_type is None: smartos_type = get_smartos_environ(uname_version) if smartos_type is None: return None elif smartos_type == SMARTOS_ENV_KVM: return JoyentMetadataLegacySerialClient( device=serial_device, timeout=serial_timeout, smartos_type=smartos_type) elif smartos_type == SMARTOS_ENV_LX_BRAND: return JoyentMetadataSocketClient(socketpath=metadata_sockfile, smartos_type=smartos_type) raise ValueError("Unknown value for smartos_type: %s" % smartos_type) def write_boot_content(content, content_f, link=None, shebang=False, mode=0o400): """ Write the content to content_f. Under the following rules: 1. If no content, remove the file 2. Write the content 3. If executable and no file magic, add it 4. If there is a link, create it @param content: what to write @param content_f: the file name @param backup_d: the directory to save the backup at @param link: if defined, location to create a symlink to @param shebang: if no file magic, set shebang @param mode: file mode Becuase of the way that Cloud-init executes scripts (no shell), a script will fail to execute if does not have a magic bit (shebang) set for the file. If shebang=True, then the script will be checked for a magic bit and to the SmartOS default of assuming that bash. """ if not content and os.path.exists(content_f): os.unlink(content_f) if link and os.path.islink(link): os.unlink(link) if not content: return util.write_file(content_f, content, mode=mode) if shebang and not content.startswith("#!"): try: cmd = ["file", "--brief", "--mime-type", content_f] (f_type, _err) = util.subp(cmd) LOG.debug("script %s mime type is %s", content_f, f_type) if f_type.strip() == "text/plain": new_content = "\n".join(["#!/bin/bash", content]) util.write_file(content_f, new_content, mode=mode) LOG.debug("added shebang to file %s", content_f) except Exception as e: util.logexc(LOG, ("Failed to identify script type for %s" % content_f, e)) if link: try: if os.path.islink(link): os.unlink(link) if content and os.path.exists(content_f): util.ensure_dir(os.path.dirname(link)) os.symlink(content_f, link) except IOError as e: util.logexc(LOG, "failed establishing content link: %s", e) def get_smartos_environ(uname_version=None, product_name=None): uname = os.uname() # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version if uname_version is None: uname_version = uname[3] if uname_version.lower() == 'brandz virtual linux': return SMARTOS_ENV_LX_BRAND if product_name is None: system_type = util.read_dmi_data("system-product-name") else: system_type = product_name if system_type and 'smartdc' in system_type.lower(): return SMARTOS_ENV_KVM return None # Convert SMARTOS 'sdc:nics' data to network_config yaml def convert_smartos_network_data(network_data=None, dns_servers=None, dns_domain=None): """Return a dictionary of network_config by parsing provided SMARTOS sdc:nics configuration data sdc:nics data is a dictionary of properties of a nic and the ip configuration desired. Additional nic dictionaries are appended to the list. Converting the format is straightforward though it does include duplicate information as well as data which appears to be relevant to the hostOS rather than the guest. For each entry in the nics list returned from query sdc:nics, we create a type: physical entry, and extract the interface properties: 'mac' -> 'mac_address', 'mtu', 'interface' -> 'name'. The remaining keys are related to ip configuration. For each ip in the 'ips' list we create a subnet entry under 'subnets' pairing the ip to a one in the 'gateways' list. """ valid_keys = { 'physical': [ 'mac_address', 'mtu', 'name', 'params', 'subnets', 'type', ], 'subnet': [ 'address', 'broadcast', 'dns_nameservers', 'dns_search', 'metric', 'pointopoint', 'routes', 'scope', 'type', ], } if dns_servers: if not isinstance(dns_servers, (list, tuple)): dns_servers = [dns_servers] else: dns_servers = [] if dns_domain: if not isinstance(dns_domain, (list, tuple)): dns_domain = [dns_domain] else: dns_domain = [] def is_valid_ipv4(addr): return '.' in addr def is_valid_ipv6(addr): return ':' in addr pgws = { 'ipv4': {'match': is_valid_ipv4, 'gw': None}, 'ipv6': {'match': is_valid_ipv6, 'gw': None}, } config = [] for nic in network_data: cfg = dict((k, v) for k, v in nic.items() if k in valid_keys['physical']) cfg.update({ 'type': 'physical', 'name': nic['interface']}) if 'mac' in nic: cfg.update({'mac_address': nic['mac']}) subnets = [] for ip in nic.get('ips', []): if ip == "dhcp": subnet = {'type': 'dhcp4'} else: subnet = dict((k, v) for k, v in nic.items() if k in valid_keys['subnet']) subnet.update({ 'type': 'static', 'address': ip, }) proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6' # Only use gateways for 'primary' nics if 'primary' in nic and nic.get('primary', False): # the ips and gateways list may be N to M, here # we map the ip index into the gateways list, # and handle the case that we could have more ips # than gateways. we only consume the first gateway if not pgws[proto]['gw']: gateways = [gw for gw in nic.get('gateways', []) if pgws[proto]['match'](gw)] if len(gateways): pgws[proto]['gw'] = gateways[0] subnet.update({'gateway': pgws[proto]['gw']}) subnets.append(subnet) cfg.update({'subnets': subnets}) config.append(cfg) if dns_servers: config.append( {'type': 'nameserver', 'address': dns_servers, 'search': dns_domain}) return {'version': 1, 'config': config} # Used to match classes to dependencies datasources = [ (DataSourceSmartOS, (sources.DEP_FILESYSTEM, )), ] # Return a list of data sources that match this set of dependencies def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) if __name__ == "__main__": import sys jmc = jmc_client_factory() if jmc is None: print("Do not appear to be on smartos.") sys.exit(1) if len(sys.argv) == 1: keys = (list(SMARTOS_ATTRIB_JSON.keys()) + list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config']) else: keys = sys.argv[1:] def load_key(client, key, data): if key in data: return data[key] if key in SMARTOS_ATTRIB_JSON: keyname = SMARTOS_ATTRIB_JSON[key] data[key] = client.get_json(keyname) elif key == "network_config": for depkey in ('network-data', 'dns_servers', 'dns_domain'): load_key(client, depkey, data) data[key] = convert_smartos_network_data( network_data=data['network-data'], dns_servers=data['dns_servers'], dns_domain=data['dns_domain']) else: if key in SMARTOS_ATTRIB_MAP: keyname, strip = SMARTOS_ATTRIB_MAP[key] else: keyname, strip = (key, False) data[key] = client.get(keyname, strip=strip) return data[key] data = {} for key in keys: load_key(client=jmc, key=key, data=data) print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')))<|fim▁end|>
if md['user-data']:
<|file_name|>DocetDocFormat.java<|end_file_name|><|fim▁begin|><|fim▁hole|> * Licensed to Diennea S.r.l. under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Diennea S.r.l. licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package docet.engine; import java.util.Arrays; /** * * */ public enum DocetDocFormat { TYPE_HTML("html", false), TYPE_PDF("pdf", true); private String name; private boolean includeResources; private DocetDocFormat(final String name, final boolean includeResources) { this.name = name; this.includeResources = includeResources; } @Override public String toString() { return this.name; } public boolean isIncludeResources() { return this.includeResources; } public static DocetDocFormat parseDocetRequestByName(final String name) { return Arrays.asList(DocetDocFormat.values()) .stream() .filter(req -> req.toString().equals(name)).findFirst().orElse(null); } }<|fim▁end|>
/*
<|file_name|>DtoConverter.java<|end_file_name|><|fim▁begin|>/******************************************************************************* * Copyright (c) 2012-2016 Codenvy, S.A. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Codenvy, S.A. - initial API and implementation *******************************************************************************/ package org.eclipse.che.api.project.server; import org.eclipse.che.api.core.ServerException; import org.eclipse.che.api.core.model.project.SourceStorage; import org.eclipse.che.api.core.model.project.type.Attribute; import org.eclipse.che.api.core.rest.shared.dto.Link; import org.eclipse.che.api.core.util.LinksHelper; import org.eclipse.che.api.project.server.importer.ProjectImporter; import org.eclipse.che.api.project.server.type.ProjectTypeDef; import org.eclipse.che.api.project.shared.dto.AttributeDto; import org.eclipse.che.api.project.shared.dto.ItemReference; import org.eclipse.che.api.project.shared.dto.ProjectImporterDescriptor; import org.eclipse.che.api.project.shared.dto.ProjectTypeDto; import org.eclipse.che.api.project.shared.dto.ValueDto; import org.eclipse.che.api.workspace.shared.dto.ProjectConfigDto; import org.eclipse.che.api.workspace.shared.dto.ProjectProblemDto; import org.eclipse.che.api.workspace.shared.dto.SourceStorageDto; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.UriBuilder; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.stream.Collectors; import static javax.ws.rs.HttpMethod.DELETE; import static javax.ws.rs.HttpMethod.GET; import static javax.ws.rs.HttpMethod.PUT; import static javax.ws.rs.core.MediaType.APPLICATION_JSON; import static org.eclipse.che.api.project.server.Constants.LINK_REL_CHILDREN; import static org.eclipse.che.api.project.server.Constants.LINK_REL_DELETE; import static org.eclipse.che.api.project.server.Constants.LINK_REL_GET_CONTENT; import static org.eclipse.che.api.project.server.Constants.LINK_REL_TREE; import static org.eclipse.che.api.project.server.Constants.LINK_REL_UPDATE_CONTENT; import static org.eclipse.che.api.project.server.Constants.LINK_REL_UPDATE_PROJECT; import static org.eclipse.che.dto.server.DtoFactory.newDto; /** * Helper methods for convert server essentials to DTO and back. * * @author andrew00x */ public class DtoConverter { private DtoConverter() { } public static ProjectTypeDto toTypeDefinition(ProjectTypeDef projectType) { final ProjectTypeDto definition = newDto(ProjectTypeDto.class).withId(projectType.getId()) .withDisplayName(projectType.getDisplayName()) .withPrimaryable(projectType.isPrimaryable()) .withMixable(projectType.isMixable()) .withAncestors(projectType.getAncestors()); final List<AttributeDto> typeAttributes = new ArrayList<>(); for (Attribute attr : projectType.getAttributes()) { ValueDto valueDto = newDto(ValueDto.class); if (attr.getValue() != null) { valueDto.withList(attr.getValue().getList()); } typeAttributes.add(newDto(AttributeDto.class).withName(attr.getName()) .withDescription(attr.getDescription()) .withRequired(attr.isRequired()) .withVariable(attr.isVariable()) .withValue(valueDto)); } definition.withAttributes(typeAttributes).withParents(projectType.getParents()); return definition; } public static ProjectImporterDescriptor toImporterDescriptor(ProjectImporter importer) { return newDto(ProjectImporterDescriptor.class).withId(importer.getId()) .withInternal(importer.isInternal()) .withDescription(importer.getDescription() != null ? importer.getDescription() : "description not found") .withCategory(importer.getCategory().getValue()); } public static ItemReference toItemReference(FileEntry file, String workspace, UriBuilder uriBuilder) throws ServerException { return newDto(ItemReference.class).withName(file.getName()) .withPath(file.getPath().toString()) .withType("file") .withAttributes(file.getAttributes()) .withModified(file.getModified()) .withContentLength(file.getVirtualFile().getLength()) .withLinks(generateFileLinks(file, workspace, uriBuilder)); } public static ItemReference toItemReference(FolderEntry folder, String workspace, UriBuilder uriBuilder) { return newDto(ItemReference.class).withName(folder.getName()) .withPath(folder.getPath().toString()) .withType(folder.isProject() ? "project" : "folder") .withAttributes(folder.getAttributes()) .withModified(folder.getModified()) .withLinks(generateFolderLinks(folder, workspace, uriBuilder)); } /** * The method tries to provide as much as possible information about project.If get error then save information about error * with 'problems' field in ProjectConfigDto. * * @param project * project from which we need get information * @param serviceUriBuilder * service for building URI * @return an instance of {@link ProjectConfigDto} */ public static ProjectConfigDto toProjectConfig(RegisteredProject project, String workspace, UriBuilder serviceUriBuilder) { ProjectConfigDto projectConfigDto = newDto(ProjectConfigDto.class); projectConfigDto.withName(project.getName()) .withPath(project.getPath()) .withDescription(project.getDescription()); List <String> mixins = project.getMixinTypes().keySet().stream().collect(Collectors.toList()); projectConfigDto.withMixins(mixins); projectConfigDto.withAttributes(project.getAttributes()); projectConfigDto.withType(project.getProjectType().getId()); projectConfigDto.withSource(toSourceDto(project.getSource())); for (RegisteredProject.Problem p : project.getProblems()) { ProjectProblemDto projectProblem = newDto(ProjectProblemDto.class).withCode(p.code).withMessage(p.message); projectConfigDto.getProblems().add(projectProblem); } if (serviceUriBuilder != null) { projectConfigDto.withLinks(generateProjectLinks(project, workspace, serviceUriBuilder)); } return projectConfigDto; } private static SourceStorageDto toSourceDto(SourceStorage sourceStorage) { SourceStorageDto storageDto = newDto(SourceStorageDto.class); if (sourceStorage != null) { storageDto.withType(sourceStorage.getType()) .withLocation(sourceStorage.getLocation()) .withParameters(sourceStorage.getParameters()); } return storageDto; } private static List<Link> generateProjectLinks(RegisteredProject project, String workspace, UriBuilder uriBuilder) { final List<Link> links = new LinkedList<>(); if (project.getBaseFolder() != null) { //here project can be not imported so base directory not exist on file system but project exist in workspace config links.addAll(generateFolderLinks(project.getBaseFolder(), workspace, uriBuilder)); } final String relPath = project.getPath().substring(1); links.add(LinksHelper.createLink(PUT, uriBuilder.clone() .path(ProjectService.class, "updateProject") .build(workspace, relPath) .toString(), APPLICATION_JSON, APPLICATION_JSON, LINK_REL_UPDATE_PROJECT )); return links; } private static List<Link> generateFolderLinks(FolderEntry folder, String workspace, UriBuilder uriBuilder) { final List<Link> links = new LinkedList<>(); final String relPath = folder.getPath().toString().substring(1); // links.add(LinksHelper.createLink(GET, // uriBuilder.clone().path(ProjectService.class, "exportZip").build(workspace, relPath).toString(), // ExtMediaType.APPLICATION_ZIP, LINK_REL_EXPORT_ZIP)); links.add(LinksHelper.createLink(GET, uriBuilder.clone().path(ProjectService.class, "getChildren").build(workspace, relPath).toString(), APPLICATION_JSON, LINK_REL_CHILDREN)); links.add(LinksHelper.createLink(GET, uriBuilder.clone().path(ProjectService.class, "getTree").build(workspace, relPath).toString(), null, APPLICATION_JSON, LINK_REL_TREE) ); links.add(LinksHelper.createLink(DELETE, uriBuilder.clone().path(ProjectService.class, "delete").build(workspace, relPath).toString(),<|fim▁hole|> LINK_REL_DELETE)); return links; } private static List<Link> generateFileLinks(FileEntry file, String workspace, UriBuilder uriBuilder) { final List<Link> links = new LinkedList<>(); final String relPath = file.getPath().toString().substring(1); links.add(LinksHelper.createLink(GET, uriBuilder.clone().path(ProjectService.class, "getFile").build(workspace, relPath).toString(), null, null, LINK_REL_GET_CONTENT)); links.add(LinksHelper.createLink(PUT, uriBuilder.clone().path(ProjectService.class, "updateFile").build(workspace, relPath).toString(), MediaType.WILDCARD, null, LINK_REL_UPDATE_CONTENT)); links.add(LinksHelper.createLink(DELETE, uriBuilder.clone().path(ProjectService.class, "delete").build(workspace, relPath).toString(), LINK_REL_DELETE)); return links; } }<|fim▁end|>
<|file_name|>test.rs<|end_file_name|><|fim▁begin|>extern crate mio; extern crate bytes; #[macro_use] extern crate log; extern crate env_logger; extern crate tempdir; pub use ports::localhost; mod test_battery; mod test_close_on_drop; mod test_echo_server; mod test_multicast; mod test_notify; mod test_register_deregister; mod test_timer; mod test_udp_socket; mod test_unix_echo_server; mod ports { use std::net::SocketAddr; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; use std::sync::atomic::Ordering::SeqCst; // Helper for getting a unique port for the task run<|fim▁hole|> // TODO: Reuse ports to not spam the system static mut NEXT_PORT: AtomicUsize = ATOMIC_USIZE_INIT; const FIRST_PORT: usize = 18080; fn next_port() -> usize { unsafe { // If the atomic was never used, set it to the initial port NEXT_PORT.compare_and_swap(0, FIRST_PORT, SeqCst); // Get and increment the port list NEXT_PORT.fetch_add(1, SeqCst) } } pub fn localhost() -> SocketAddr { let s = format!("127.0.0.1:{}", next_port()); FromStr::from_str(&s).unwrap() } } pub fn sleep_ms(ms: usize) { use std::thread; thread::sleep_ms(ms as u32); }<|fim▁end|>
<|file_name|>cleanClinTSV.py<|end_file_name|><|fim▁begin|># -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# import miscClin import tsvIO import sys # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# remapDict = {} DAYS_PER_YEAR = 365.2425 # ------------------------------------------------------------------------- ## # DEPENDING ON WHICH TUMOR TYPE IS BEING PROCESSED, THESE SWITCHES MAY # NEED TO BE CHANGED ... remapDict["anatomic_organ_subdivision"] = {} if (1): remapDict["anatomic_organ_subdivision"]["na"] = "NA" remapDict["anatomic_organ_subdivision"]["rectum"] = 0 remapDict["anatomic_organ_subdivision"]["rectosigmoid_junction"] = 1 remapDict["anatomic_organ_subdivision"]["sigmoid_colon"] = 2 remapDict["anatomic_organ_subdivision"]["descending_colon"] = 3 remapDict["anatomic_organ_subdivision"]["splenic_flexure"] = 4 remapDict["anatomic_organ_subdivision"]["transverse_colon"] = 5 remapDict["anatomic_organ_subdivision"]["hepatic_flexure"] = 6 remapDict["anatomic_organ_subdivision"]["ascending_colon"] = 7 remapDict["anatomic_organ_subdivision"]["cecum"] = 8 if (0): remapDict["anatomic_organ_subdivision"]["na"] = "NA" remapDict["anatomic_organ_subdivision"]["bilateral"] = "bilateral" remapDict["anatomic_organ_subdivision"]["left"] = "left" remapDict["anatomic_organ_subdivision"]["right"] = "right" if (0): remapDict["anatomic_organ_subdivision"][""] = "NA" remapDict["anatomic_organ_subdivision"]["na"] = "NA" remapDict["anatomic_organ_subdivision"]["brain"] = "brain" remapDict["histological_type"] = {} if (0): remapDict["histological_type"]["na"] = "NA" remapDict["histological_type"]["colon_adenocarcinoma"] = 0 remapDict["histological_type"]["rectal_adenocarcinoma"] = 0 remapDict["histological_type"]["colon_mucinous_adenocarcinoma"] = 1 remapDict["histological_type"]["rectal_mucinous_adenocarcinoma"] = 1 if (0): remapDict["histological_type"]["na"] = "NA" remapDict["histological_type"][ "untreated_primary_(de_novo)_gbm"] = "de_novo" remapDict["histological_type"]["treated_primary_gbm"] = "primary" remapDict["ethnicity"] = {} remapDict["ethnicity"]["hispanic_or_latino"] = "hispanic" remapDict["ethnicity"]["not_hispanic_or_latino"] = "not_hispanic" # ------------------------------------------------------------------------- ## remapDict["tumor_grade"] = {} remapDict["tumor_grade"]["na"] = "NA" remapDict["tumor_grade"]["gx"] = "NA" remapDict["tumor_grade"]["gb"] = "NA" remapDict["tumor_grade"]["g1"] = 1 remapDict["tumor_grade"]["g2"] = 2 remapDict["tumor_grade"]["g3"] = 3 remapDict["tumor_grade"]["g4"] = 4 remapDict["tumor_grade"]["high grade"] = 3 # ??? remapDict["tumor_grade"]["high_grade"] = 3 # ??? if (0): remapDict["tumor_stage"] = {} remapDict["tumor_stage"]["na"] = "NA" remapDict["tumor_stage"]["i"] = 1 remapDict["tumor_stage"]["ia"] = 1.2 remapDict["tumor_stage"]["ib"] = 1.4 remapDict["tumor_stage"]["ic"] = 1.6 remapDict["tumor_stage"]["ii"] = 2 remapDict["tumor_stage"]["iia"] = 2.2 remapDict["tumor_stage"]["iib"] = 2.4 remapDict["tumor_stage"]["iic"] = 2.6 remapDict["tumor_stage"]["iii"] = 3 remapDict["tumor_stage"]["iiia"] = 3.2 remapDict["tumor_stage"]["iiib"] = 3.4 remapDict["tumor_stage"]["iiic"] = 3.6 remapDict["tumor_stage"]["iv"] = 4 remapDict["tumor_stage"]["iva"] = 4.2 remapDict["tumor_stage"]["ivb"] = 4.4 remapDict["tumor_stage"]["ivc"] = 4.6 remapDict["breast_tumor_pathologic_grouping_stage"] = {} remapDict["breast_tumor_pathologic_grouping_stage"]["na"] = "NA" remapDict["breast_tumor_pathologic_grouping_stage"]["x"] = "NA" remapDict["breast_tumor_pathologic_grouping_stage"]["tis"] = 0.5 remapDict["breast_tumor_pathologic_grouping_stage"]["i"] = 1 remapDict["breast_tumor_pathologic_grouping_stage"]["ia"] = 1.2 remapDict["breast_tumor_pathologic_grouping_stage"]["ib"] = 1.4 remapDict["breast_tumor_pathologic_grouping_stage"]["ii"] = 2 remapDict["breast_tumor_pathologic_grouping_stage"]["iia"] = 2.2 remapDict["breast_tumor_pathologic_grouping_stage"]["iib"] = 2.4 remapDict["breast_tumor_pathologic_grouping_stage"]["iic"] = 2.6 remapDict["breast_tumor_pathologic_grouping_stage"]["iii"] = 3 remapDict["breast_tumor_pathologic_grouping_stage"]["iiia"] = 3.2 remapDict["breast_tumor_pathologic_grouping_stage"]["iiib"] = 3.4 remapDict["breast_tumor_pathologic_grouping_stage"]["iiic"] = 3.6 remapDict["breast_tumor_pathologic_grouping_stage"]["iv"] = 4 remapDict["primary_tumor_pathologic_spread"] = {} remapDict["primary_tumor_pathologic_spread"]["na"] = "NA" remapDict["primary_tumor_pathologic_spread"]["tx"] = "NA" remapDict["primary_tumor_pathologic_spread"]["t0"] = 0 remapDict["primary_tumor_pathologic_spread"]["tis"] = 0.5 remapDict["primary_tumor_pathologic_spread"]["t1"] = 1 remapDict["primary_tumor_pathologic_spread"]["t1a"] = 1.2 remapDict["primary_tumor_pathologic_spread"]["t1b"] = 1.4 remapDict["primary_tumor_pathologic_spread"]["t2"] = 2 remapDict["primary_tumor_pathologic_spread"]["t2a"] = 2.2 remapDict["primary_tumor_pathologic_spread"]["t2b"] = 2.4 remapDict["primary_tumor_pathologic_spread"]["t3"] = 3 remapDict["primary_tumor_pathologic_spread"]["t3a"] = 3.2 remapDict["primary_tumor_pathologic_spread"]["t3b"] = 3.4 remapDict["primary_tumor_pathologic_spread"]["t3c"] = 3.6 remapDict["primary_tumor_pathologic_spread"]["t4"] = 4 remapDict["primary_tumor_pathologic_spread"]["t4a"] = 4.2 remapDict["primary_tumor_pathologic_spread"]["t4b"] = 4.4 remapDict["breast_tumor_pathologic_t_stage"] = {} remapDict["breast_tumor_pathologic_t_stage"]["na"] = "NA" remapDict["breast_tumor_pathologic_t_stage"]["tx"] = "NA" remapDict["breast_tumor_pathologic_t_stage"]["t1"] = 1 remapDict["breast_tumor_pathologic_t_stage"]["t1a"] = 1.2 remapDict["breast_tumor_pathologic_t_stage"]["t1b"] = 1.4 remapDict["breast_tumor_pathologic_t_stage"]["t1c"] = 1.6 remapDict["breast_tumor_pathologic_t_stage"]["t2"] = 2 remapDict["breast_tumor_pathologic_t_stage"]["t2a"] = 2.2 remapDict["breast_tumor_pathologic_t_stage"]["t2b"] = 2.4 remapDict["breast_tumor_pathologic_t_stage"]["t2c"] = 2.6 remapDict["breast_tumor_pathologic_t_stage"]["t3"] = 3 remapDict["breast_tumor_pathologic_t_stage"]["t3a"] = 3.4 remapDict["breast_tumor_pathologic_t_stage"]["t3b"] = 3.4 remapDict["breast_tumor_pathologic_t_stage"]["t3c"] = 3.6 remapDict["breast_tumor_pathologic_t_stage"]["t4"] = 4 remapDict["breast_tumor_pathologic_t_stage"]["t4a"] = 4.2 remapDict["breast_tumor_pathologic_t_stage"]["t4b"] = 4.4 remapDict["breast_tumor_pathologic_t_stage"]["t4c"] = 4.6 remapDict["breast_tumor_pathologic_t_stage"]["t4d"] = 4.8 remapDict["breast_carcinoma_estrogen_receptor_status"] = {} remapDict["breast_carcinoma_estrogen_receptor_status"]["na"] = "NA" remapDict["breast_carcinoma_estrogen_receptor_status"]["not_performed"] = "NA" remapDict["breast_carcinoma_estrogen_receptor_status"][ "performed_but_not_available"] = "NA" remapDict["breast_carcinoma_estrogen_receptor_status"][ "indeterminate"] = "indeterminate" remapDict["breast_carcinoma_estrogen_receptor_status"]["positive"] = "positive" remapDict["breast_carcinoma_estrogen_receptor_status"]["negative"] = "negative" remapDict["lymphnode_pathologic_spread"] = {} remapDict["lymphnode_pathologic_spread"]["na"] = "NA" remapDict["lymphnode_pathologic_spread"]["nx"] = "NA" remapDict["lymphnode_pathologic_spread"]["n0"] = 0 remapDict["lymphnode_pathologic_spread"]["n1"] = 1 remapDict["lymphnode_pathologic_spread"]["n1a"] = 1.2 remapDict["lymphnode_pathologic_spread"]["n1b"] = 1.4 remapDict["lymphnode_pathologic_spread"]["n1c"] = 1.6 remapDict["lymphnode_pathologic_spread"]["n2"] = 2 remapDict["lymphnode_pathologic_spread"]["n2a"] = 2.2 remapDict["lymphnode_pathologic_spread"]["n2b"] = 2.4 remapDict["lymphnode_pathologic_spread"]["n2c"] = 2.6 remapDict["lymphnode_pathologic_spread"]["n3"] = 3 remapDict["lymphnode_pathologic_spread"]["n3a"] = 3.2 remapDict["breast_tumor_pathologic_n_stage"] = {} remapDict["breast_tumor_pathologic_n_stage"]["na"] = "NA" remapDict["breast_tumor_pathologic_n_stage"]["pnx"] = "NA" remapDict["breast_tumor_pathologic_n_stage"]["pn0"] = 0 remapDict["breast_tumor_pathologic_n_stage"]["pn0(i-)"] = 0.2 remapDict["breast_tumor_pathologic_n_stage"]["pn0(i+)"] = 0.4 remapDict["breast_tumor_pathologic_n_stage"]["pn1"] = 1 remapDict["breast_tumor_pathologic_n_stage"]["pn1mi"] = 1.1 remapDict["breast_tumor_pathologic_n_stage"]["pn1a"] = 1.2 remapDict["breast_tumor_pathologic_n_stage"]["pn1b"] = 1.4 remapDict["breast_tumor_pathologic_n_stage"]["pn1c"] = 1.6 remapDict["breast_tumor_pathologic_n_stage"]["pn2"] = 2 remapDict["breast_tumor_pathologic_n_stage"]["pn2a"] = 2.2 remapDict["breast_tumor_pathologic_n_stage"]["pn2b"] = 2.4 remapDict["breast_tumor_pathologic_n_stage"]["pn3"] = 3 remapDict["breast_tumor_pathologic_n_stage"]["pn3a"] = 3.2 remapDict["breast_tumor_pathologic_n_stage"]["pn3b"] = 3.4 remapDict["breast_tumor_pathologic_n_stage"]["pn3c"] = 3.6 remapDict["breast_tumor_pathologic_n_stage"] = {} remapDict["breast_tumor_pathologic_n_stage"]["na"] = "NA" remapDict["breast_tumor_pathologic_n_stage"]["nx"] = "NA" remapDict["breast_tumor_pathologic_n_stage"]["n0"] = 0 remapDict["breast_tumor_pathologic_n_stage"]["n0(i-)"] = 0.2 remapDict["breast_tumor_pathologic_n_stage"]["n0_(i-)"] = 0.2 remapDict["breast_tumor_pathologic_n_stage"]["n0(i+)"] = 0.4 remapDict["breast_tumor_pathologic_n_stage"]["n0_(i+)"] = 0.4 remapDict["breast_tumor_pathologic_n_stage"]["n0_(mol+)"] = 0.3 remapDict["breast_tumor_pathologic_n_stage"]["n1"] = 1 remapDict["breast_tumor_pathologic_n_stage"]["n1mi"] = 1.1 remapDict["breast_tumor_pathologic_n_stage"]["n1a"] = 1.2 remapDict["breast_tumor_pathologic_n_stage"]["n1b"] = 1.4 remapDict["breast_tumor_pathologic_n_stage"]["n1c"] = 1.6 remapDict["breast_tumor_pathologic_n_stage"]["n2"] = 2 remapDict["breast_tumor_pathologic_n_stage"]["n2a"] = 2.2 remapDict["breast_tumor_pathologic_n_stage"]["n2b"] = 2.4 remapDict["breast_tumor_pathologic_n_stage"]["n3"] = 3 remapDict["breast_tumor_pathologic_n_stage"]["n3a"] = 3.2 remapDict["breast_tumor_pathologic_n_stage"]["n3b"] = 3.4 remapDict["breast_tumor_pathologic_n_stage"]["n3c"] = 3.6 remapDict["distant_metastasis_pathologic_spread"] = {} remapDict["distant_metastasis_pathologic_spread"]["na"] = "NA" remapDict["distant_metastasis_pathologic_spread"]["mx"] = "NA" remapDict["distant_metastasis_pathologic_spread"]["m0"] = 0 remapDict["distant_metastasis_pathologic_spread"]["m1"] = 1 remapDict["distant_metastasis_pathologic_spread"]["m1a"] = 1.2 remapDict["distant_metastasis_pathologic_spread"]["m1b"] = 1.4 remapDict["breast_tumor_clinical_m_stage"] = {} remapDict["breast_tumor_clinical_m_stage"]["na"] = "NA" remapDict["breast_tumor_clinical_m_stage"]["mx"] = "NA" remapDict["breast_tumor_clinical_m_stage"]["cm0_(i+)"] = "NA" remapDict["breast_tumor_clinical_m_stage"]["m0"] = 0 remapDict["breast_tumor_clinical_m_stage"]["m1"] = 1 remapDict["breast_tumor_clinical_m_stage"]["m1a"] = 1.2 remapDict["breast_tumor_clinical_m_stage"]["m1b"] = 1.4 remapDict["residual_tumor"] = {} remapDict["residual_tumor"]["na"] = "NA" remapDict["residual_tumor"]["rx"] = "NA" remapDict["residual_tumor"]["r0"] = 0 remapDict["residual_tumor"]["r1"] = 1 remapDict["residual_tumor"]["r2"] = 2 remapDict["her2_immunohistochemistry_level_result"] = {} remapDict["her2_immunohistochemistry_level_result"]["na"] = "NA" remapDict["her2_immunohistochemistry_level_result"]["0"] = 0 remapDict["her2_immunohistochemistry_level_result"]["1+"] = 1 remapDict["her2_immunohistochemistry_level_result"]["2+"] = 2 remapDict["her2_immunohistochemistry_level_result"]["3+"] = 3 remapDict["breast_carcinoma_immunohistochemistry_pos_cell_score"] = {} remapDict["breast_carcinoma_immunohistochemistry_pos_cell_score"]["na"] = "NA" remapDict["breast_carcinoma_immunohistochemistry_pos_cell_score"]["0"] = 0 remapDict["breast_carcinoma_immunohistochemistry_pos_cell_score"]["1+"] = 1 remapDict["breast_carcinoma_immunohistochemistry_pos_cell_score"]["2+"] = 2 remapDict["breast_carcinoma_immunohistochemistry_pos_cell_score"]["3+"] = 3 remapDict["breast_carcinoma_immunohistochemistry_pos_cell_score"]["4+"] = 4 remapDict["immunohistochemistry_positive_cell_score"] = {} remapDict["immunohistochemistry_positive_cell_score"]["na"] = "NA" remapDict["immunohistochemistry_positive_cell_score"]["0"] = 0 remapDict["immunohistochemistry_positive_cell_score"]["1+"] = 1 remapDict["immunohistochemistry_positive_cell_score"]["2+"] = 2 remapDict["immunohistochemistry_positive_cell_score"]["3+"] = 3 remapDict["immunohistochemistry_positive_cell_score"]["4+"] = 4 remapDict["progesterone_receptor_level_cell_percent_category"] = {} remapDict["progesterone_receptor_level_cell_percent_category"]["na"] = "NA" remapDict["progesterone_receptor_level_cell_percent_category"]["<10%"] = 0 remapDict["progesterone_receptor_level_cell_percent_category"]["10-19%"] = 1 remapDict["progesterone_receptor_level_cell_percent_category"]["20-29%"] = 2 remapDict["progesterone_receptor_level_cell_percent_category"]["30-39%"] = 3 remapDict["progesterone_receptor_level_cell_percent_category"]["40-49%"] = 4 remapDict["progesterone_receptor_level_cell_percent_category"]["50-59%"] = 5 remapDict["progesterone_receptor_level_cell_percent_category"]["60-69%"] = 6 remapDict["progesterone_receptor_level_cell_percent_category"]["70-79%"] = 7 remapDict["progesterone_receptor_level_cell_percent_category"]["80-89%"] = 8 remapDict["progesterone_receptor_level_cell_percent_category"]["90-99%"] = 9 remapDict["er_level_cell_percentage_category"] = {} remapDict["er_level_cell_percentage_category"]["na"] = "NA" remapDict["er_level_cell_percentage_category"]["<10%"] = 0 remapDict["er_level_cell_percentage_category"]["10-19%"] = 1 remapDict["er_level_cell_percentage_category"]["20-29%"] = 2 remapDict["er_level_cell_percentage_category"]["30-39%"] = 3 remapDict["er_level_cell_percentage_category"]["40-49%"] = 4 remapDict["er_level_cell_percentage_category"]["50-59%"] = 5 remapDict["er_level_cell_percentage_category"]["60-69%"] = 6 remapDict["er_level_cell_percentage_category"]["70-79%"] = 7 remapDict["er_level_cell_percentage_category"]["80-89%"] = 8 remapDict["er_level_cell_percentage_category"]["90-99%"] = 9 remapDict["her2_erbb_pos_finding_cell_percent_category"] = {} remapDict["her2_erbb_pos_finding_cell_percent_category"]["na"] = "NA" remapDict["her2_erbb_pos_finding_cell_percent_category"]["<10%"] = 0 remapDict["her2_erbb_pos_finding_cell_percent_category"]["10-19%"] = 1 remapDict["her2_erbb_pos_finding_cell_percent_category"]["20-29%"] = 2 remapDict["her2_erbb_pos_finding_cell_percent_category"]["30-39%"] = 3 remapDict["her2_erbb_pos_finding_cell_percent_category"]["40-49%"] = 4 remapDict["her2_erbb_pos_finding_cell_percent_category"]["50-59%"] = 5 remapDict["her2_erbb_pos_finding_cell_percent_category"]["60-69%"] = 6 remapDict["her2_erbb_pos_finding_cell_percent_category"]["70-79%"] = 7 remapDict["her2_erbb_pos_finding_cell_percent_category"]["80-89%"] = 8 remapDict["her2_erbb_pos_finding_cell_percent_category"]["90-99%"] = 9 remapDict["axillary_lymph_node_stage_method_type"] = {} remapDict["axillary_lymph_node_stage_method_type"]["na"] = "NA" remapDict["axillary_lymph_node_stage_method_type"]["OTHER_(SPECIFY)"] = "NA" remapDict["axillary_lymph_node_stage_method_type"]["other_(specify)"] = "NA" # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def stringInList_CaseInsens ( aString, aList ): for s in aList: u = s.upper() if ( aString.upper() == u ): return ( 1 ) return ( 0 ) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def remapCategoricalFeatures(allClinDict): print " " print " in remapCategoricalFeatures " keyList = allClinDict.keys() keyList.sort() for aKey in keyList: aKey2 = aKey.lower() if (aKey2 in remapDict.keys()): numRemap = 0 print " " print " looking at <%s> " % aKey2 tmpV = allClinDict[aKey] # print " part of original vector : ", tmpV[:10] newV = [0] * len(tmpV) for kk in range(len(tmpV)): bKey2 = tmpV[kk].lower() if (bKey2.startswith("stage_")): bKey2 = bKey2[6:] if (bKey2.startswith("stage ")): bKey2 = bKey2[6:] try: newV[kk] = remapDict[aKey2][bKey2] if (newV[kk] != "NA" and newV[kk] != "na"): if (newV[kk].lower() != bKey2): # print " remapping ... ", aKey, aKey2, kk, # bKey2, remapDict[aKey2][bKey2] numRemap += 1 except: if (0): print " WARNING in remapCategoricalFeatures ... nothing to remap to ??? " print " <%s> <%s> %d <%s> " % (aKey, aKey2, kk, bKey2) print " <%s> " % remapDict[aKey2] # sys.exit(-1) newV[kk] = bKey2 if (numRemap > 0): print " --> using remapped values for <%s> " % aKey print " mapping dictionary : ", remapDict[aKey2] print " part of original vector : ", tmpV[:10] print " part of new vector : ", newV[:10] allClinDict[aKey] = newV return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def getNumPatients(allClinDict): aKey = allClinDict.keys()[0] return ( len(allClinDict[aKey]) ) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def findProperKey(allClinDict, aString): keyList = allClinDict.keys() foundList = [] for aKey in keyList: if ( aKey.lower().find(aString.lower()) >=0 ): foundList += [ aKey ] if ( len(foundList) == 0 ): return ( "NO KEY" ) elif ( len(foundList) == 1 ): return ( foundList[0] ) else: ## look for a perfect match ... for mString in foundList: mTokens = mString.split(':') if ( len(mTokens) == 1 ): if ( mTokens[0].lower() == aString.lower() ): return ( mString ) elif ( len(mTokens) > 2 ): try: if ( mTokens[2].lower() == aString.lower() ): return ( mString ) except: print " findProperKey: ERROR in try ??? ", mString print foundList print " " print " ERROR in findProperKey ??? multiple matches " print " but none of them are perfect matches ... " print aString print foundList print " " sys.exit(-1) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def computeLymphnodesFraction(allClinDict): aKey = findProperKey ( allClinDict, "number_of_lymphnodes_positive_by_he" ) bKey = findProperKey ( allClinDict, "number_of_lymphnodes_examined" ) if (aKey not in allClinDict.keys()): print " " print " skipping computeLymphnodesFraction " return (allClinDict) if (bKey not in allClinDict.keys()): print " " print " skipping computeLymphnodesFraction " return (allClinDict) print " " print " in computeLymphnodesFraction ... " numClin = getNumPatients(allClinDict) newV = [0] * numClin for kk in range(numClin): if (allClinDict[bKey][kk] == "NA"): newV[kk] = "NA" elif (allClinDict[aKey][kk] == "NA"): newV[kk] = "NA" elif (int(allClinDict[bKey][kk]) == 0): newV[kk] = "NA" else: newV[kk] = float(allClinDict[aKey][kk]) / \ float(allClinDict[bKey][kk]) allClinDict["N:SAMP:fraction_lymphnodes_positive_by_he:::::"] = newV return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def addTag2Key ( aKey, aTag ): aTokens = aKey.split(':') if ( len(aTokens) >= 7 ): newKey = aTokens[0] + ':' + aTokens[1] + ':' + aTokens[2] if ( aTag[0] == "_" ): newKey += aTag else: newKey += "_" + aTag for ii in range(3,len(aTokens)): newKey += ":" + aTokens[ii] else: newKey = aKey if ( aTag[0] == "_" ): newKey += aTag else: newKey += "_" + aTag return ( newKey ) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def checkBarcodes(allClinDict): zKey = findProperKey (allClinDict, "bcr_patient_barcode" ) numClin = getNumPatients(allClinDict) for ii in range(numClin): if ( allClinDict[zKey][ii].find("_") >= 0 ): print " BAD barcode !!! ", ii, allClinDict[zKey][ii] sys.exit(-1) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # fields of interest: # days_to_birth # days_to_initial_pathologic_diagnosis <-- this is always 0 # days_to_submitted_specimen_dx # days_to_last_followup # days_to_last_known_alive # days_to_death # also: # new_tumor_event_after_initial_treatment # days_to_new_tumor_event_after_initial_treatment def addFollowupInfo(allClinDict): print " " print " in addFollowupInfo ... " # ------------------------------------------------------------------------ # FIRST: if there is a days_to_last_known_alive, then check that it is # used consistently, otherwise create it zKey = findProperKey (allClinDict, "bcr_patient_barcode") aKey = findProperKey (allClinDict, "days_to_last_known_alive") bKey = findProperKey (allClinDict, "days_to_last_followup") cKey = findProperKey (allClinDict, "days_to_death") haveA = (aKey in allClinDict.keys()) haveB = (bKey in allClinDict.keys()) haveC = (cKey in allClinDict.keys()) print " " print " STEP #1 " print " have flags A, B, and C : ", haveA, haveB, haveC ## print " allClinDict.keys() : " ## print allClinDict.keys() if (haveA): # if we have the "days_to_last_known_alive" field, check that it # is consistent with the other two fields ... numClin = getNumPatients(allClinDict) numNotNA = 0 for kk in range(numClin): ## if we have a 'days_to_death' field and it is not NA, then set 'days_to_last_known_alive' to that value if (haveC): if (str(allClinDict[cKey][kk]).upper() != "NA"): allClinDict[aKey][kk] = allClinDict[cKey][kk] ## if we have a 'days_to_last_followup' field and it is not NA, then ... if (haveB): if (str(allClinDict[bKey][kk]).upper() != "NA"): if (str(allClinDict[aKey][kk]).upper() == "NA"): allClinDict[aKey][kk] = allClinDict[bKey][kk] if (str(allClinDict[aKey][kk]).upper() != "NA"): numNotNA += 1 print " UPDATED days_to_last_known_alive (%d) : " % numNotNA print allClinDict[aKey] else: # create it ... if ( aKey == "NO KEY" ): aKey = "N:CLIN:days_to_last_known_alive:::::" numClin = getNumPatients(allClinDict) newVec = [0] * numClin numNotNA = 0 for kk in range(numClin): newVec[kk] = "NA" if (haveC): if (str(allClinDict[cKey][kk]).upper() != "NA"): newVec[kk] = allClinDict[cKey][kk] if (haveB): if (str(allClinDict[bKey][kk]).upper() != "NA"): if (str(newVec[kk]).upper() == "NA"): newVec[kk] = allClinDict[bKey][kk] if (str(newVec[kk]).upper() != "NA"): numNotNA += 1 print " NEW days_to_last_known_alive (%d) : " % numNotNA ## print newVec allClinDict[aKey] = newVec # ------------------------------------------------------------------------ # SECOND: if there is a "days_to_submitted_specimen_dx", then create # a set of "days_to_" features that instead of being relative # to "initial_pathologic_diagnosis" are relative to "submitted_specimen" print " " print " STEP #2 " aKey = findProperKey (allClinDict, "days_to_submitted_specimen_dx") tKey = findProperKey (allClinDict, "days_to_initial_pathologic_diagnosis") if (aKey in allClinDict.keys()): haveA = 1 else: print " do not have [days_to_submitted_specimen_dx] in allClinDict " haveA = 0 if (tKey in allClinDict.keys()): haveT = 1 else: print " do not have [days_to_initial_pathologic_diagnosis] in allClinDict " haveT = 0 try: numClin = getNumPatients(allClinDict) for bKey in allClinDict.keys(): if (haveA == 0): continue if (bKey == aKey): continue if (bKey.find("days_to_") >= 0): newKey = addTag2Key ( bKey, "relSS" ) print " --> making newKey <%s> from bKey <%s> [%d] " % (newKey, bKey, numClin) newVec = [0] * numClin numNotNA = 0 for kk in range(numClin): ## initialize to NA newVec[kk] = "NA" ## skip if an important value is NA if (str(allClinDict[aKey][kk]).upper() == "NA"): continue if (str(allClinDict[bKey][kk]).upper() == "NA"): continue if (haveT): if (str(allClinDict[tKey][kk]).upper() == "NA"): continue ## deltaDays is either (days_to_submitted_specimen_dx) - (days_to_initial_pathologic_diagnosis) ## or just (days_to_submitted_specimen_dx) if (haveT): deltaDays = allClinDict[aKey][kk] - allClinDict[tKey][kk] else: deltaDays = allClinDict[aKey][kk] ## and then we subtract 'delta days' from the original key to make the new relative key newVec[kk] = allClinDict[bKey][kk] - deltaDays print " STEP2a ", kk, allClinDict[zKey][kk], allClinDict[bKey][kk], allClinDict[aKey][kk], deltaDays, newVec[kk] numNotNA += 1 if ( numNotNA > 30 ): print " adding new key (%d) : " % numNotNA, newKey ## print newVec[:5] ## print newVec[-5:] allClinDict[newKey] = newVec else: print " NOT adding new key (%d) : ", numNotNA, newKey if (bKey.find("age_at_") >= 0): ## make sure that this is not a "stage_at_" feature !!! if ( bKey.find("stage_at_") >= 0 ): continue newKey = addTag2Key ( bKey, "relSS" ) print " --> making newKey <%s> from bKey <%s> [%d] " % (newKey, bKey, numClin) newVec = [0] * numClin numNotNA = 0 for kk in range(numClin): ## initialize to NA newVec[kk] = "NA" ## skip if an important value is NA if (str(allClinDict[aKey][kk]).upper() == "NA"): continue if (str(allClinDict[bKey][kk]).upper() == "NA"): continue if (haveT): if (str(allClinDict[tKey][kk]).upper() == "NA"): continue ## deltaDays is either (days_to_submitted_specimen_dx) - (days_to_initial_pathologic_diagnosis) ## or just (days_to_submitted_specimen_dx) if (haveT): deltaDays = allClinDict[aKey][kk] - allClinDict[tKey][kk] else: deltaDays = allClinDict[aKey][kk] ## and then we subtract 'delta days' from the original key to make the new relative key ## 04mar14 : actually we need to ADD here because "age" should go UP with deltaDays ... newVec[kk] = allClinDict[bKey][kk] + ( float(deltaDays) / DAYS_PER_YEAR ) print " STEP2b ", kk, allClinDict[zKey][kk], allClinDict[bKey][kk], allClinDict[aKey][kk], deltaDays, newVec[kk] numNotNA += 1 if ( numNotNA > 30 ): print " adding new key (%d) : " % numNotNA, newKey ## print newVec[:5] ## print newVec[-5:] allClinDict[newKey] = newVec else: print " NOT adding new key (%d) : ", numNotNA, newKey except: print " --> failed in this try (x) " doNothing = 1 # ------------------------------------------------------------------------ # THIRD: if there is a "days_to_sample_procurement", then create # a set of "days_to_" features that instead of being relative # to "initial_pathologic_diagnosis" are relative to "sample_procurement print " " print " STEP #3 " aKey = findProperKey (allClinDict, "days_to_sample_procurement") tKey = findProperKey (allClinDict, "days_to_initial_pathologic_diagnosis") if (aKey in allClinDict.keys()): haveA = 1 else: print " do not have [days_to_sample_procurement] in allClinDict " haveA = 0 if (tKey in allClinDict.keys()): haveT = 1 else: haveT = 0 print " do not have a [days_to_initial_pathologic_diagnosis] key " try: numClin = getNumPatients(allClinDict) for bKey in allClinDict.keys(): if (haveA == 0): continue if (bKey == aKey): continue if (bKey.find("days_to_") >= 0): ## make sure that this is not one of the relSS features just added !!! if ( bKey.find("relSS") >= 0 ): continue newKey = addTag2Key ( bKey, "relSP" ) print " --> making newKey <%s> from bKey <%s> [%d] " % (newKey, bKey, numClin) newVec = [0] * numClin numNotNA = 0 for kk in range(numClin): ## initialize to NA newVec[kk] = "NA" ## skip if an important value is NA if (str(allClinDict[aKey][kk]).upper() == "NA"): continue if (str(allClinDict[bKey][kk]).upper() == "NA"): continue if (haveT): if (str(allClinDict[tKey][kk]).upper() == "NA"): continue ## deltaDays is either (days_to_sample_procurement) - (days_to_initial_pathologic_diagnosis) ## or just (days_to_sample_procurement) if (haveT): deltaDays = allClinDict[aKey][kk] - allClinDict[tKey][kk] else: deltaDays = allClinDict[aKey][kk] ## and then we subtract 'delta days' from the original key to make the new relative key newVec[kk] = allClinDict[bKey][kk] - deltaDays print " STEP3a ", kk, allClinDict[zKey][kk], allClinDict[bKey][kk], allClinDict[aKey][kk], deltaDays, newVec[kk] numNotNA += 1 if ( numNotNA > 30 ): print " adding new key (%d) : " % numNotNA, newKey ## print newVec[:5] ## print newVec[-5:] allClinDict[newKey] = newVec else: print " NOT adding new key (%d) : ", numNotNA, newKey if (bKey.find("age_at_") >= 0): ## make sure that this is not one of the relSS features just added !!! if ( bKey.find("relSS") >= 0 ): continue ## make sure that this is not a "stage_at_" feature !!! if ( bKey.find("stage_at_") >= 0 ): continue newKey = addTag2Key ( bKey, "relSP" ) print " --> making newKey <%s> from bKey <%s> [%d] " % (newKey, bKey, numClin) newVec = [0] * numClin numNotNA = 0 for kk in range(numClin): ## initialize to NA newVec[kk] = "NA" ## skip if an important value is NA print " checking for important information ... ", aKey, bKey, tKey print allClinDict[aKey][kk] print allClinDict[bKey][kk] if (str(allClinDict[aKey][kk]).upper() == "NA"): continue if (str(allClinDict[bKey][kk]).upper() == "NA"): continue if (haveT): print allClinDict[tKey][kk] if (str(allClinDict[tKey][kk]).upper() == "NA"): continue <|fim▁hole|> deltaDays = allClinDict[aKey][kk] - allClinDict[tKey][kk] else: deltaDays = allClinDict[aKey][kk] print " computed deltaDays : ", deltaDays ## and then we subtract 'delta days', scaled to years ... ## 03mar14 : actually we need to ADD here ... newVec[kk] = allClinDict[bKey][kk] + ( float(deltaDays) / DAYS_PER_YEAR ) print " STEP3b ", kk, allClinDict[zKey][kk], allClinDict[bKey][kk], allClinDict[aKey][kk], deltaDays, newVec[kk] numNotNA += 1 if ( numNotNA > 30 ): print " adding new key (%d) : " % numNotNA, newKey ## print newVec[:5] ## print newVec[-5:] allClinDict[newKey] = newVec else: print " NOT adding new key (%d) : ", numNotNA, newKey except: print " --> failed in this try (y) " doNothing = 1 return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # fields of interest: # days_to_birth # days_to_initial_pathologic_diagnosis <-- this is always 0 # days_to_submitted_specimen_dx # days_to_last_followup # days_to_last_known_alive # days_to_death # also: # new_tumor_event_after_initial_treatment # days_to_new_tumor_event_after_initial_treatment def checkFollowupInfo(allClinDict): print " " print " in checkFollowupInfo ... " # FIRST: if there is a days_to_last_known_alive, then check that it is # used consistently, otherwise create it zKey = findProperKey (allClinDict, "bcr_patient_barcode") aKey = findProperKey (allClinDict, "days_to_last_known_alive") bKey = findProperKey (allClinDict, "days_to_last_followup") cKey = findProperKey (allClinDict, "days_to_death") dKey = findProperKey (allClinDict, "vital_status") haveA = (aKey in allClinDict.keys()) haveB = (bKey in allClinDict.keys()) haveC = (cKey in allClinDict.keys()) haveD = (dKey in allClinDict.keys()) print " have flags A, B, C and D : ", haveA, haveB, haveC, haveD if ( not haveD ): print " skipping this function ... requires vital_status " return (allClinDict) ## print " allClinDict.keys() : " ## print allClinDict.keys() numClin = getNumPatients(allClinDict) # range of days_to_last_known_alive is typically something like [0,3196] for kk in range(numClin): if (str(allClinDict[dKey][kk]).upper() == "DEAD"): if (str(allClinDict[cKey][kk]).upper() == "NA"): print " ERROR !!! need to know when this person died !!! ", allClinDict[zKey][kk] print kk print aKey, allClinDict[aKey][kk] print bKey, allClinDict[bKey][kk] print cKey, allClinDict[cKey][kk] print dKey, allClinDict[dKey][kk] print " UPDATING vital_status to Alive ... " print " " ## because we do not have a days_to_death value, we are going to call this person "Alive" allClinDict[dKey][kk] = "Alive" if (str(allClinDict[dKey][kk]).upper() == "ALIVE"): if (str(allClinDict[aKey][kk]).upper() == "NA"): if (str(allClinDict[bKey][kk]).upper() == "NA"): print " ERROR !!! no information about follow-up ??? ", allClinDict[zKey][kk] print kk print aKey, allClinDict[aKey][kk] print bKey, allClinDict[bKey][kk] print cKey, allClinDict[cKey][kk] print dKey, allClinDict[dKey][kk] print " UPDATING days_to_last_known_alive and days_to_last_followup to 0 " print " " allClinDict[aKey][kk] = 0 allClinDict[bKey][kk] = 0 else: print " ERROR in checkFollowupInfo ... how did we get here ??? " sys.exit(-1) return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # derive the preferred stage tumor stage from the comparison of the # reported one with the derived one def PreferredStage(reported, computed): t = testTumorStage(reported, computed) if (t == "AGREE"): return(reported) if (t == "Stage cannot be derived from TNM"): return(reported) if (t == "Derived stage is more specific"): return(repStage(computed)) # For SupTab1 use return(computed) if (t == "Stage can be derived from TNM"): return(repStage(computed)) # For SupTab1 use return(computed) if (t == "Stage more specific than TNM"): return(reported) if (t == "DISAGREE"): return(reported) # assuming the reported one to be valid! return("Error: Lack a preferred stage") # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Return first element of a vector, or if input is string, the string itself def repStage(substage): if (type(substage) is str): return(substage) if (type(substage) is list): return(substage[0]) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Characterize the difference between reported and inferred tumor stage def testTumorStage(reported, computed): # Agreement includes "in" relationship and NA equivalence if (type(computed) is list): if (reported in computed): return("AGREE") if (type(computed) is str): if (reported == computed): return("AGREE") if (((reported == "STAGE IVA") | (reported == "STAGE IVB")) & (computed == "STAGE IV")): return("Stage more specific than TNM") if ((reported == "NA") & (computed != "NA")): return("Stage can be derived from TNM") if ((reported != "NA") & (computed == "NA")): return("Stage cannot be derived from TNM") if (repStage(computed).startswith(reported)): return("Derived stage is more specific") return("DISAGREE") # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Derive Tumor Stage from TNM and AJCC table, 7th edition # sometimes if we get something like "M1" when it should actually be "M1A" # or "M1B", we will pick the first/lowest thing that it could be ... def getTumorStage(T, N, M): print " WARNING ... this function should NOT be called ... " sys.exit(-1) T = T.upper() N = N.upper() M = M.upper() if (M == "M1"): # Seems to be TCGA choice if IVA, IVB not specified return ("STAGE IV") if (M == "M1A"): return ("STAGE IVA") if (M == "M1B"): return ("STAGE IVB") if (T == "TX"): T = "NA" if (N == "NX"): N = "NA" if (M == "MX"): M = "NA" if (T == "NA" or N == "NA" or M == "NA"): return ("NA") if (T == "T0" and N == "N0" and M == "M0"): return ("STAGE 0") if (T == "Tis" and N == "N0" and M == "M0"): return ("STAGE 0") if (T == "T1" and N == "N0" and M == "M0"): return ("STAGE I") if (T == "T1A" and N == "N0" and M == "M0"): return ("STAGE I") if (T == "T1B" and N == "N0" and M == "M0"): return ("STAGE I") if (T == "T2" and N == "N0" and M == "M0"): return ("STAGE I") if (T == "T2A" and N == "N0" and M == "M0"): return ("STAGE I") if (T == "T2B" and N == "N0" and M == "M0"): return ("STAGE I") if (T == "T3" and N == "N0" and M == "M0"): return ("STAGE IIA") if (T == "T3A" and N == "N0" and M == "M0"): return ("STAGE IIA") if (T == "T3B" and N == "N0" and M == "M0"): return ("STAGE IIB") if (T == "T3C" and N == "N0" and M == "M0"): return ("STAGE IIB") if (T == "T4A" and N == "N0" and M == "M0"): return ("STAGE IIB") if (T == "T4B" and N == "N0" and M == "M0"): return ("STAGE IIC") if (T == "T4" and N == "N0" and M == "M0"): return (["STAGE IIB", "STAGE IIC"]) if (T == "T1" and N == "N1" and M == "M0"): return ("STAGE IIIA") if (T == "T1A" and N == "N1" and M == "M0"): return ("STAGE IIIA") if (T == "T1B" and N == "N1" and M == "M0"): return ("STAGE IIIA") if (T == "T1" and N == "N1A" and M == "M0"): return ("STAGE IIIA") if (T == "T1" and N == "N1B" and M == "M0"): return ("STAGE IIIA") if (T == "T1" and N == "N1C" and M == "M0"): return ("STAGE IIIA") if (T == "T1" and N == "N2" and M == "M0"): return (["STAGE IIIA", "STAGE IIIB"]) # CHOICE IIIA, IIIB if (T == "T1B" and N == "N2" and M == "M0"): return ("STAGE IIIB") if (T == "T1" and N == "N2A" and M == "M0"): return ("STAGE IIIA") if (T == "T1" and N == "N3" and M == "M0"): return ("STAGE IIIA") if (T == "T2" and N == "N1" and M == "M0"): return ("STAGE IIIA") if (T == "T2A" and N == "N1" and M == "M0"): return ("STAGE IIIA") if (T == "T2B" and N == "N1" and M == "M0"): return ("STAGE IIIA") if (T == "T2" and N == "N1A" and M == "M0"): return ("STAGE IIIA") if (T == "T2" and N == "N1B" and M == "M0"): return ("STAGE IIIA") if (T == "T2" and N == "N1C" and M == "M0"): return ("STAGE IIIA") if (T == "T3" and N == "N1" and M == "M0"): return ("STAGE IIIB") if (T == "T3A" and N == "N1" and M == "M0"): return ("STAGE IIIB") if (T == "T3B" and N == "N1" and M == "M0"): return ("STAGE IIIB") if (T == "T3" and N == "N1A" and M == "M0"): return ("STAGE IIIB") if (T == "T3" and N == "N1B" and M == "M0"): return ("STAGE IIIB") if (T == "T3" and N == "N1C" and M == "M0"): return ("STAGE IIIB") if (T == "T4A" and N == "N1" and M == "M0"): return ("STAGE IIIB") if (T == "T4A" and N == "N1A" and M == "M0"): return ("STAGE IIIB") if (T == "T4A" and N == "N1B" and M == "M0"): return ("STAGE IIIB") if (T == "T4A" and N == "N1C" and M == "M0"): return ("STAGE IIIB") if (T == "T4" and N == "N1" and M == "M0"): return (["STAGE IIIB", "STAGE IIIC"]) if (T == "T2" and N == "N2" and M == "M0"): return ("STAGE IIIB") if (T == "T2A" and N == "N2" and M == "M0"): return ("STAGE IIIB") if (T == "T2B" and N == "N2" and M == "M0"): return ("STAGE IIIB") if (T == "T2" and N == "N2A" and M == "M0"): return ("STAGE IIIB") if (T == "T2" and N == "N2B" and M == "M0"): return ("STAGE IIIB") if (T == "T2" and N == "N2C" and M == "M0"): return ("STAGE IIIC") if (T == "T2" and N == "N3" and M == "M0"): return ("STAGE IIIC") if (T == "T2B" and N == "N3" and M == "M0"): return ("STAGE IIIC") if (T == "T3" and N == "N2" and M == "M0"): return (["STAGE IIIB", "STAGE IIIC"]) # CHOICE IIIB, IIIC if (T == "T3" and N == "N2A" and M == "M0"): return ("STAGE IIIB") if (T == "T3" and N == "N2C" and M == "M0"): return ("STAGE IIIC") if (T == "T1" and N == "N2B" and M == "M0"): return ("STAGE IIIB") if (T == "T3" and N == "N2B" and M == "M0"): return ("STAGE IIIC") if (T == "T3" and N == "N3" and M == "M0"): return ("STAGE IIIC") if (T == "T4" and N == "N2" and M == "M0"): return ("STAGE IIIC") if (T == "T4A" and N == "N2" and M == "M0"): return ("STAGE IIIC") if (T == "T4A" and N == "N2A" and M == "M0"): return ("STAGE IIIC") if (T == "T4A" and N == "N2B" and M == "M0"): return ("STAGE IIIC") if (T == "T4A" and N == "N2C" and M == "M0"): return ("STAGE IIIC") if (T == "T4" and N == "N3" and M == "M0"): return ("STAGE IIIC") if (T == "T4" and N == "N3A" and M == "M0"): return ("STAGE IIIC") if (T == "T4A" and N == "N3" and M == "M0"): return ("STAGE IIIC") if (T == "T4B" and N == "N1" and M == "M0"): return ("STAGE IIIC") if (T == "T4B" and N == "N2" and M == "M0"): return ("STAGE IIIC") if (T == "T4B" and N == "N1A" and M == "M0"): return ("STAGE IIIC") if (T == "T4B" and N == "N1B" and M == "M0"): return ("STAGE IIIC") if (T == "T4B" and N == "N1C" and M == "M0"): return ("STAGE IIIC") if (T == "T4B" and N == "N2C" and M == "M0"): return ("STAGE IIIC") if (T == "T4B" and N == "N2A" and M == "M0"): return ("STAGE IIIC") if (T == "T4B" and N == "N2B" and M == "M0"): return ("STAGE IIIC") if (T == "T4B" and N == "N2B" and M == "M0"): return ("STAGE IIIC") if (T == "T4B" and N == "N3A" and M == "M0"): return ("STAGE IIIC") # We reach this point if all values are non-NA, but combination is not in # AJCC tumor table print " ERROR in getTumorStage ??? ", T, N, M return ("Not in AJCC Table?") sys.exit(-1) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def checkTumorStage(allClinDict): if ("tumor_stage" not in allClinDict.keys()): print " skipping checkTumorStage ... " return (allClinDict) else: print " running checkTumorStage ... " numClin = getNumPatients(allClinDict) print " total number of patients : ", numClin if (0): stringList = ["tumor", "stage", "spread"] for aKey in allClinDict.keys(): for aString in stringList: if (aKey.find(aString) >= 0): print aKey reqKeyList = [ "bcr_patient_barcode", "tumor_stage", "primary_tumor_pathologic_spread", "lymphnode_pathologic_spread", "distant_metastasis_pathologic_spread"] numNotFound = 0 for aKey in reqKeyList: if (aKey not in allClinDict.keys()): numNotFound += 1 if (numNotFound > 0): print " skipping checkTumorStage ... " return (allClinDict) pKey = getProperKey ( allClinDict, "bcr_patient_barcode" ) sKey = getProperKey ( allClinDict, "tumor_stage" ) tKey = getProperKey ( allClinDict, "primary_tumor_pathologic_spread" ) nKey = getProperKey ( allClinDict, "lymphnode_pathologic_spread" ) mKey = getProperKey ( allClinDict, "distant_metastasis_pathologic_spread" ) for ii in range(numClin): aCode = allClinDict[pKey][ii] curTumorStage = allClinDict[sKey][ii] curT = allClinDict[tKey][ii] curN = allClinDict[nKey][ii] curM = allClinDict[mKey][ii] # print " checking tumor stage for <%s> <%s> <%s> <%s> <%s> " % ( # aCode, curTumorStage, curN, curM, curT ) ## removing this 15aug2014 ... if ( 0 ): curTumorStage = curTumorStage.upper() curTumorStage = curTumorStage.strip() if (curTumorStage != "NA"): if (not curTumorStage.startswith("STAGE ")): curTumorStage = "STAGE " + curTumorStage # as of 09nov12, NOT attempting to derive tumor stage from T, N, and M if (0): # get AJCC-derived tumor stage, compare to DCC value, and decide # which to use ajccStage = getTumorStage(curT, curN, curM) newStage = PreferredStage(curTumorStage, ajccStage) allClinDict[sKey][ii] = newStage # report if (type(ajccStage) is list): ajccString = ' OR '.join(ajccStage) else: ajccString = ajccStage print aCode.upper() + ', TNM:' + curT.upper() + ' ' + curN.upper() + ' ' + curM.upper() + ', DCC Stage:' \ + curTumorStage + ', AJCC Stage:' + ajccString + ', Comparison:' \ + \ testTumorStage(curTumorStage, ajccStage) + \ ', Will use: ' + newStage return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def checkVitalStatus(allClinDict): vsKey = findProperKey ( allClinDict, "vital_status" ) bcKey = findProperKey ( allClinDict, "bcr_patient_barcode" ) if ( vsKey == "NO KEY" ): print " skipping checkVitalStatus ... " return (allClinDict) print " running checkVitalStatus ... " numClin = getNumPatients(allClinDict) print " total number of patients : ", numClin numLC = 0 numDC = 0 for ii in range(numClin): aCode = allClinDict[bcKey][ii] curStatus = allClinDict[vsKey][ii] doChange = 1 try: newStatus = curStatus except: try: if (curStatus == 0): newStatus = "Alive" numLC += 1 elif (curStatus == 1): newStatus = "Dead" numDC += 1 except: doChange = 0 if (doChange): allClinDict[vsKey][ii] = newStatus if (numLC + numDC > 0): print " WARNING: changed some vital status fields ... %d %d " % (numLC, numDC) return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def updateAge(allClinDict): pKey = findProperKey ( allClinDict, "bcr_patient_barcode" ) aKey = findProperKey ( allClinDict, "age_at_initial_pathologic_diagnosis" ) bKey = findProperKey ( allClinDict, "days_to_birth" ) print " running updateAge ... " numClin = getNumPatients(allClinDict) print " total number of patients : ", numClin for ii in range(numClin): try: aCode = allClinDict[pKey][ii] curAge = allClinDict[aKey][ii] curD2B = allClinDict[bKey][ii] newAge = float(0 - int(curD2B)) / DAYS_PER_YEAR # now we want to limit the 'precision' to two decimal places newAge = float(int((100. * newAge) + 0.49)) / 100. if (abs(curAge - int(newAge)) > 0): print " ERROR in updateAge ??? ", curAge, curD2B, newAge, aCode allClinDict[aKey][ii] = newAge except: doNothing = 1 return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def removeParens(oneKey): print " in removeParens ... " origUniqList = [] newUniqList = [] newKey = [] for aLabel in oneKey: if (aLabel not in origUniqList): origUniqList += [aLabel] if (aLabel.find("(") >= 0): print " --> found open paren ... at %d in <%s> " % (aLabel.find("("), aLabel) bLabel = "" copyOn = 1 for ii in range(len(aLabel)): if (aLabel[ii] == "("): copyOn = 0 if (copyOn): bLabel += aLabel[ii] if (aLabel[ii] == ")"): copyOn = 1 if (bLabel.startswith("_")): bLabel = bLabel[1:] if (bLabel.endswith("_")): bLabel = bLabel[:-1] newKey += [bLabel] if (bLabel not in newUniqList): newUniqList += [bLabel] else: newKey += [aLabel] if (aLabel not in newUniqList): newUniqList += [aLabel] print origUniqList print newUniqList if (len(newUniqList) == len(origUniqList)): print " --> removing parenthetical strings " print origUniqList print newUniqList return (newKey) else: print " NOT removing parenthetical strings " return (oneKey) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def removeSpecialChars(oneKey): okExceptions = ['4+'] # ran into a strange case here where most of the elements were strings # like 'grade_1' but some were just integers like 3 :( # so ... these next few blocks of code are TOTAL HACKS ... numInt = 0 numNot = 0 aString = '' aInt = 9999 for ii in range(len(oneKey)): aLabel = str(oneKey[ii]) if (aLabel.upper() == "NA"): continue if (aLabel.upper() == "UNKNOWN"): oneKey[ii] = "NA" continue try: iVal = int(aLabel) numInt += 1 aInt = iVal except: numNot += 1 aString = aLabel print " number of integers = %d number NOT = %d " % (numInt, numNot) if (numInt > 0 and numNot > 0): # for now, we are just checking for 'grade' strings that are sometimes # 'grade_3' and sometimes just '3' if (aString.lower().startswith("grade_")): for ii in range(len(oneKey)): aLabel = str(oneKey[ii]) if (aLabel.upper() == "NA"): continue if (not aLabel.lower().startswith("grade_")): try: iVal = int(aLabel) aString = "Grade_%d" % iVal oneKey[ii] = aString except: print " FAILED to prepend grade ??? ", aLabel sys.exit(-1) # or if there are at least twice as many strings as integers, then we # will cast the integers to strings ... elif (numInt < (numNot / 2)): for ii in range(len(oneKey)): aLabel = str(oneKey[ii]) if (aLabel.upper() == "NA"): continue try: iVal = int(aLabel) oneKey[ii] = str(iVal) except: doNothing = 1 elif (aString not in okExceptions): if ( 1 ): print " WARNING ... something odd about this feature ... ", aInt, aString print oneKey ## return ([]) ## sys.exit(-1) origUniqList = [] newUniqList = [] newKey = [] for aLabel in oneKey: if (aLabel not in origUniqList): origUniqList += [aLabel] bLabel = "" try: for ii in range(len(aLabel)): if (aLabel[ii] == ' '): bLabel += "_" elif (aLabel[ii] == "'"): bLabel += "_" elif (aLabel[ii] == '"'): bLabel += "_" elif (aLabel[ii] == ':'): bLabel += "_" elif (aLabel[ii] == '/'): bLabel += "_" # elif ( aLabel[ii] == '-' ): ## bLabel += "_" elif (aLabel[ii] == '.'): bLabel += "_" elif (aLabel[ii] == ','): bLabel += "_" else: bLabel += aLabel[ii] except: print " ERROR in removeSpecialChars ??? " print " oneKey = <%s> " % (oneKey) print " aLabel = <%s> " % (aLabel) sys.exit(-1) ii = bLabel.find("__") while (ii >= 0): bLabel = bLabel[:ii] + bLabel[ii + 1:] ii = bLabel.find("__") newKey += [bLabel] if (bLabel not in newUniqList): newUniqList += [bLabel] print origUniqList print newUniqList if (len(newUniqList) == len(origUniqList)): return (newKey) else: print " NOT removing parenthetical strings " return (oneKey) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def getCommonPrefix(aLabel, bLabel): nn = 0 while (aLabel[nn].lower() == bLabel[nn].lower()): nn += 1 if (nn >= len(aLabel)): return (aLabel[:nn]) if (nn >= len(bLabel)): return (aLabel[:nn]) return (aLabel[:nn]) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def getCommonSuffix(aLabel, bLabel): nn = -1 while (aLabel[nn].lower() == bLabel[nn].lower()): nn -= 1 if (-nn > len(aLabel)): return (aLabel) if (-nn > len(bLabel)): return (bLabel) if (nn == -1): return ("") else: return (aLabel[nn + 1:]) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def removeCommonPrefix(oneKey, labelList): print " in removeCommonPrefix : ", labelList madeChange = 0 nLabel = len(labelList) for ii in range(nLabel): for jj in range(ii + 1, nLabel): commonPrefix = getCommonPrefix(labelList[ii], labelList[jj]) # if the commonPrefix is *ever* the entire string, then # we cannot really use this ... if (commonPrefix == labelList[ii]): continue if (commonPrefix == labelList[jj]): continue if (len(commonPrefix) > 4): print ii, jj, commonPrefix newKey = [] for cLabel in oneKey: if (cLabel.lower().startswith(commonPrefix)): dLabel = cLabel[len(commonPrefix):] if (len(dLabel) < 4): dLabel = cLabel else: madeChange += 1 else: dLabel = cLabel if (dLabel[0] == '_'): dLabel = dLabel[1:] newKey += [dLabel] newList = [] for cLabel in labelList: if (cLabel.lower().startswith(commonPrefix)): dLabel = cLabel[len(commonPrefix):] if (len(dLabel) < 4): dLabel = cLabel else: madeChange += 1 else: dLabel = cLabel if (dLabel[0] == '_'): dLabel = dLabel[1:] newList += [dLabel] if (len(labelList) == len(newList)): labelList = newList oneKey = newKey if (madeChange > 0): print " after removeCommonPrefix : ", madeChange, labelList return (oneKey, labelList) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def removeCommonSuffix(oneKey, labelList): print " in removeCommonSuffix : ", labelList madeChange = 0 nLabel = len(labelList) for ii in range(nLabel): for jj in range(ii + 1, nLabel): commonSuffix = getCommonSuffix(labelList[ii], labelList[jj]) # if the commonSuffix is *ever* the entire string, then # we cannot really use this ... if (commonSuffix == labelList[ii]): continue if (commonSuffix == labelList[jj]): continue if (len(commonSuffix) > 4): print ii, jj, commonSuffix newKey = [] for cLabel in oneKey: if (cLabel.lower().endswith(commonSuffix)): dLabel = cLabel[:-len(commonSuffix)] if (len(dLabel) < 4): dLabel = cLabel else: madeChange += 1 else: dLabel = cLabel if (dLabel[-1] == '_'): dLabel = dLabel[:-1] newKey += [dLabel] newList = [] for cLabel in labelList: if (cLabel.lower().endswith(commonSuffix)): dLabel = cLabel[:-len(commonSuffix)] if (len(dLabel) < 4): dLabel = cLabel else: madeChange += 1 else: dLabel = cLabel if (dLabel[-1] == '_'): dLabel = dLabel[:-1] newList += [dLabel] if (len(labelList) == len(newList)): labelList = newList oneKey = newKey if (0): print " removeCommonSuffix has not yet been fully tested ... " print labelList print oneKey sys.exit(-1) if (madeChange > 0): print " after removeCommonSuffix : ", madeChange, labelList return (oneKey, labelList) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def abbrevCategStrings(allClinDict): print " " print " in abbrevCategStrings ... " print " " keyList = allClinDict.keys() keyList.sort() for aKey in keyList: if (aKey.find("bcr_patient_barcode") >= 0): print " all barcodes : " print allClinDict[aKey] print " done " continue (keyType, nCount, nNA, nCard, labelList, labelCount) = miscClin.lookAtKey(allClinDict[aKey]) print aKey, keyType, nCount, nNA if (keyType == "NOMINAL"): # remove weird characters from the strings ... print " calling removeSpecialChars ... <%s> " % (aKey) allClinDict[aKey] = removeSpecialChars(allClinDict[aKey]) # if we get nothing back, then skip ... if (allClinDict[aKey] == []): print " WARNING ... got nothing back ??? ", aKey continue # otherwise, look at cardinality, type, etc ... (keyType, nCount, nNA, nCard, labelList, labelCount) = miscClin.lookAtKey(allClinDict[aKey]) maxLen = 0 skipFlag = 0 for aLabel in labelList: try: maxLen = max(maxLen, len(aLabel)) except: print " what is up with this key ??? ", aKey, labelList skipFlag = 1 if (skipFlag): continue if (maxLen > 10): ## print aKey, labelList, maxLen # first try at making the labels a bit shorter by removing # parenthetical elements ... allClinDict[aKey] = removeParens(allClinDict[aKey]) (keyType, nCount, nNA, nCard, labelList, labelCount) = miscClin.lookAtKey(allClinDict[aKey]) maxLen = 0 for aLabel in labelList: maxLen = max(maxLen, len(aLabel)) ## print aKey, labelList, maxLen # removing this step for now (04dec12) if (0): # next try to remove common prefixes or suffixes ... if (maxLen > 10): (allClinDict[aKey], labelList) = removeCommonPrefix( allClinDict[aKey], labelList) (allClinDict[aKey], labelList) = removeCommonSuffix( allClinDict[aKey], labelList) maxLen = 0 for aLabel in labelList: maxLen = max(maxLen, len(aLabel)) ## print aKey, labelList, maxLen if (maxLen > 25): print " --> strings are still rather long, but not sure what to do about this ... " print labelList, maxLen print " " print " " return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def checkPrefix(labelList, aPrefix): nLabel = len(labelList) nHas = 0 for aLabel in labelList: bLabel = aLabel.upper() if (bLabel == "NA"): nLabel -= 1 continue if (bLabel.startswith(aPrefix)): nHas += 1 if ((nHas + 2) >= nLabel): return (1) return (0) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def translateArabic(aLabel, usePrefix): # print " in translateArabic ... " pLen = len(usePrefix) bLabel = aLabel.upper() if (bLabel.startswith(usePrefix)): bLabel = bLabel[pLen:] if (bLabel[0] == "_"): bLabel = bLabel[1:] bLen = len(bLabel) found = 0 for iLen in range(bLen, 0, -1): # print found, iLen, bLabel[:iLen] if (not found): try: curN = int(bLabel[:iLen]) found = 1 except: doNothing = 1 if (not found): # X means that it could not be assessed, so returning NA if (bLabel == "X"): return ("NA") # B means 'borderline' but returning NA ... elif (bLabel == "B"): return ("NA") else: print " ERROR ??? <%s> <%s> --> returning NA " % (bLabel, usePrefix) return ("NA") rLen = len(str(curN)) if (len(bLabel) > rLen): bLabel = bLabel[rLen:] if (bLabel == "A"): curN += 0.2 elif (bLabel == "B"): curN += 0.4 elif (bLabel == "C"): curN += 0.6 else: print " left over in translateArabic <%s> <%s> <%s> " % (bLabel, aLabel, usePrefix) return (curN) else: return ("NA") # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def translateRoman(aLabel, usePrefix): romanList = ["VIII", "III", "VII", "IV", "IX", "II", "VI", "I", "V", "X"] numbrList = [8, 3, 7, 4, 9, 2, 6, 1, 5, 10] pLen = len(usePrefix) bLabel = aLabel.upper() if (bLabel.startswith(usePrefix)): bLabel = bLabel[pLen:] if (bLabel[0] == "_"): bLabel = bLabel[1:] found = 0 for kk in range(len(romanList)): if (not found): if (bLabel.startswith(romanList[kk])): found = 1 curKK = kk curN = numbrList[kk] curR = romanList[kk] if (not found): if (bLabel == "X"): return ("NA") elif (bLabel == "TIS"): return ("NA") else: print " ERROR ??? ", bLabel, usePrefix sys.exit(-1) rLen = len(curR) if (len(bLabel) > rLen): bLabel = bLabel[rLen:] if (bLabel == "A"): curN += 0.2 elif (bLabel == "B"): curN += 0.4 elif (bLabel == "C"): curN += 0.6 else: print " left over in translateRoman <%s> <%s> <%s> " % (bLabel, aLabel, usePrefix) return (curN) else: return ("NA") # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def checkRomanNumerals(labelList, usePrefix): skipList = ["_"] stripList = ["A", "B", "C", "X", "0"] romanList = ["I", "V", "X"] pLen = len(usePrefix) yesR = 0 notR = 0 for aLabel in labelList: bLabel = aLabel.upper() if (bLabel.startswith(usePrefix)): bLabel = bLabel[pLen:] if (bLabel[-1] in stripList): bLabel = bLabel[:-1] for ii in range(len(bLabel)): if (bLabel[ii] in romanList): yesR += 1 else: if (bLabel[ii] not in skipList): notR += 1 # print " in checkRomanNumerals : ", yesR, notR if (notR == 0): return (1) if (notR > yesR): return (0) if (yesR > 0): print " ??? strange counts in checkRomanNumerals ??? " return (1) return (0) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # this function tries to create a numerical feature for a categorical # feature ... def addNumericalFeatures(allClinDict): print " " print " " print " in addNumericalFeatures ... " keyList = allClinDict.keys() keyList.sort() # CAREFUL: add on things only at the end of this list ... prefixList = ["T", "N", "M", "STAGE", "GRADE", "G", "PT"] nPrefix = len(prefixList) prefixBits = [0] * nPrefix for aKey in keyList: if (aKey == "bcr_patient_barcode"): continue (keyType, nCount, nNA, nCard, labelList, labelCount) = miscClin.lookAtKey(allClinDict[aKey]) if (keyType == "NOMINAL"): if (nCard > 2 and nCard < 15): tmpKey = aKey.lower() if (tmpKey.find("stage") >= 0 or tmpKey.find("grade") >= 0 or tmpKey.find("pathologic_spread") >= 0): print " considering this categorical feature ... ", aKey, keyType, nCard, labelList, labelCount for iP in range(nPrefix): aPrefix = prefixList[iP] prefixBits[iP] = checkPrefix(labelList, aPrefix) # if the 'GRADE' bit gets set, then unset the 'G' bit if (prefixBits[4]): prefixBits[5] = 0 # print prefixBits usePrefix = "" if (sum(prefixBits) == 1): for iP in range(nPrefix): if (prefixBits[iP]): usePrefix = prefixList[iP] elif (sum(prefixBits) > 1): print " ERROR ??? how can it have multiple prefix bits ON ??? " sys.exit(-1) # print " usePrefix <%s> " % usePrefix isRoman = checkRomanNumerals(labelList, usePrefix) # print " isRoman = %d " % isRoman if (aKey[1] == ":"): tokenList = aKey.split(':') newLabel = "N:" + \ tokenList[1] + ":" + tokenList[2] + "_derived" for ii in range(3, len(tokenList)): newLabel += ":" + tokenList[ii] else: newLabel = "N:CLIN:" + aKey + "_derived" if (newLabel in allClinDict.keys()): print " this feature label already exists ??? ", newLabel sys.exit(-1) curV = allClinDict[aKey] numClin = len(curV) tmpV = [0] * numClin for kk in range(numClin): if (curV[kk] == "NA"): tmpV[kk] = "NA" elif (isRoman): tmpV[kk] = translateRoman(curV[kk], usePrefix) else: tmpV[kk] = translateArabic(curV[kk], usePrefix) if (0): if (tmpV[kk] == 0): print " why is tmpV[kk] still ZERO ??? ", kk, numClin, curV[kk], usePrefix, tmpV[kk] numNA = 0 notNA = 0 for kk in range(numClin): if (tmpV[kk] == "NA"): numNA += 1 else: notNA += 1 if (numNA > 10 * notNA): print " --> NOT adding this new feature <%s> " % newLabel, list(set(tmpV)), numNA, notNA, usePrefix, isRoman else: print " --> ADDING new feature !!! <%s> " % newLabel, list(set(tmpV)), numNA, notNA, usePrefix, isRoman allClinDict[newLabel] = tmpV print " " print " " print " " return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def getMappingDict(featName, auxName): mapDict = {} if (featName[1] == ':'): if (featName[6] == ':'): tokenList = featName.split(':') tmpFeatName = tokenList[2] else: tmpFeatName = featName try: mapFilename = "../" + auxName + "/" + tmpFeatName + ".map" fh = file(mapFilename) firstLine = 1 for aLine in fh: aLine = aLine.strip() ## aLine = aLine.upper() tokenList = aLine.split('\t') if (firstLine): if (tokenList[0].upper() == tmpFeatName.upper()): numNew = len(tokenList) - 1 newNames = tokenList[1:] print newNames firstLine = 0 else: print " ERROR ??? invalid mapping file ??? " print mapFilename print tokenList print tmpFeatName print " FAILING out of TRY " sys.exit(-1) else: mapDict[str(tokenList[0])] = tokenList[1:] fh.close() print " mapping dictionary read from <%s> : " % mapFilename print mapDict print " " return (mapDict, newNames) except: return (mapDict, []) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def getMapping ( mapDict, curV, ii ): for k in mapDict.keys(): if ( k.lower() == curV.lower() ): return ( mapDict[k][ii] ) print " FAILED TO GET MAPPING ??? ", curV, ii print mapDict sys.exit(-1) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def addDerivedFeatures(allClinDict, auxName): print " " print " " print " in addDerivedFeatures ... " keyList = allClinDict.keys() keyList.sort() for aKey in keyList: if (aKey == "bcr_patient_barcode"): continue (keyType, nCount, nNA, nCard, labelList, labelCount) = miscClin.lookAtKey(allClinDict[aKey]) print " considering key <%s> " % (aKey), keyType, nCard if (keyType == "NOMINAL"): if (nCard > 2 and nCard < 25): tmpKey = aKey.lower() tmpList = [] for aLabel in labelList: tmpL = str(aLabel) if (tmpL not in tmpList): tmpList += [tmpL] labelList = tmpList if (1): print " considering this categorical feature ... ", aKey, keyType, nCard, labelList, labelCount (mapDict, newNames) = getMappingDict(aKey, auxName) # if there is no mapping file, then we won't be making any # new features ... if (len(newNames) == 0): continue # sanity check ... if (0): for bKey in mapDict.keys(): if ( stringInList_CaseInsens ( bKey, labelList ) ): print " ERROR ??? mapping does not match this feature ??? " print mapDict print labelList sys.exit(-1) if (1): for bLabel in labelList: try: if ( not stringInList_CaseInsens ( bLabel, mapDict.keys() ) ): print " ************************************************** " print " ERROR ??? feature value not in mapDict ??? ", bLabel print " labelList : ", labelList print " mapDict : ", mapDict print " --> WILL NOT ADD ANY DERIVED FEATURES AT THIS TIME " print " ************************************************** " continue # sys.exit(-1) except: doNothing = 1 # if there is no mapping file, then we won't be making any # new features ... if (len(newNames) == 0): continue # but if we do have one or more mappings, then we need # to create those features ... for ithName in range(len(newNames)): aName = newNames[ithName] print " looping over %d mappings ... " % len(newNames), ithName, aName # the first thing we need to figure out is whether this is another # categorical feature, or a numerical one ... isNum = 1 uVec = [] for bKey in mapDict.keys(): curVal = mapDict[bKey][ithName] if (curVal == "NA"): continue if ( stringInList_CaseInsens ( curVal, uVec ) ): uVec += [curVal] try: fVal = float(curVal) except: isNum = 0 print " is numerical ??? ", isNum if (len(uVec) == 1): print " mapping to a constant ??? " sys.exit(-1) elif (len(uVec) == 2): print " mapping is binary " # if the mapping produces a binary feature, then # over-ride the numerical feature if (isNum): print " over-riding the fact that the features LOOKS numerical ... " isNum = 0 if (aName[1] == ":"): if (aName[0] == "N"): if (not isNum): print " ERROR ??? new feature does not look to be numerical ???? " print aName, uVec sys.exit(-1) # start setting up the new feature ... newLabel = aName if (newLabel in allClinDict.keys()): print " this feature label already exists ??? ", newLabel sys.exit(-1) curV = allClinDict[aKey] numClin = len(curV) tmpV = [0] * numClin for kk in range(numClin): if (curV[kk].upper() == "NA"): tmpV[kk] = "NA" else: try: tmpV[kk] = getMapping ( mapDict, curV[kk], ithName ) ## tmpV[kk] = mapDict[curV[kk]][ithName] except: print " ERROR ??? failed to map ??? setting to NA but MUST FIX !!! " print kk, curV[kk], ithName print mapDict if (1): tmpV[kk] = "NA" else: sys.exit(-1) numNA = 0 notNA = 0 for kk in range(numClin): if (tmpV[kk] == "NA"): numNA += 1 else: notNA += 1 if (numNA > 10 * notNA): print " --> NOT adding this new feature <%s> " % newLabel, list(set(tmpV)), numNA, notNA else: print " --> ADDING new feature !!! <%s> " % newLabel, list(set(tmpV)), numNA, notNA allClinDict[newLabel] = tmpV print " " print " " print " " print " RETURNING from addDerivedFeatures ... " return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # this function creates N binary indicator vectors based on a single nominal # (categorical) variable of cardinality N -- the indicator vectors still # contain strings ("I0" and "I1") so that we can tell that they are still not # truly "numeric" vectors ... def addIndicatorFeatures(allClinDict): magicStrings = ["patient", "person", "vital", "surviv", "race", "ethnic", "prior", "gender", "age_at", "ageat", "radiation", "chemo", "therapy", "treat", "performance", "days_to_", "daysto", "year_of", "yearof", "surgical", "recurrence", "pregnancies"] keyList = allClinDict.keys() keyList.sort() for aKey in keyList: if (aKey == "bcr_patient_barcode"): continue (keyType, nCount, nNA, nCard, labelList, labelCount) = miscClin.lookAtKey(allClinDict[aKey]) if (keyType == "NOMINAL"): if (nCard > 2 and nCard < 27): print " " print " " print " in addIndicatorFeatures ... ", aKey, keyType, nCard, labelList, labelCount for aLabel in labelList: # sometimes even though we have a "categorical" feature, some of the # categories appear to be integers or floating point values # ... if (type(aLabel) is float): print " we seem to have a floating point value ??? ", aLabel iVal = int(aLabel + 0.001) xVal = float(aLabel) - iVal print iVal, xVal if (abs(xVal) < 0.001): aLabel = "%d" % iVal else: aLabel = str(aLabel) elif (type(aLabel) is int): iVal = int(aLabel) aLabel = "%d" % iVal print " " ## print aKey, aLabel try: # 012345678901234567890123456789... # C:CLIN:<label> # C:CLIN:<label>:a:b:c:d:e if (aKey[1] == ":" and aKey[6] == ":"): # if this feature name already has a prefix (eg # "C:CLIN:") featType = aKey[2:7] i1 = aKey[7:].find(':') if (i1 < 0): # if there are no further ':' firstName = aKey[7:] secondName = ":::::" else: # if there are ... firstName = aKey[7:7 + i1] secondName = aKey[7 + i1 + 1:] print " (a) got to here ... ", featType, aLabel, firstName, secondName newLabel = "B:" + featType + \ "I(" + aLabel + "|" + firstName + ")" + \ secondName print " (b) got to here ... ", newLabel if (newLabel.find("|):") > 0): print " (a) BAILING !!!! ", newLabel sys.exit(-1) else: # here we really need to have some way to guess whether this # should be a CLIN or a SAMP feature ... typeString = "UNK" for aString in magicStrings: if (aKey.find(aString) >= 0): typeString = "CLIN" if (typeString == "UNK"): print " defaulting to type SAMP for this feature : <%s> " % (aKey) typeString = "SAMP" print " (c) got to here ... ", typeString, aLabel newLabel = "B:" + typeString + ":" + \ "I(" + aLabel + "|" + aKey + ")" except: print " (b) BAILING !!! " print " ERROR in addIndicatorFeatures ??? ", aLabel, aKey sys.exit(-1) # make sure there are no blanks ... newLabel = tsvIO.replaceBlanks(newLabel, "_") if (newLabel in allClinDict.keys()): print " this indicator variable already exists so I will not make a new one ... ", newLabel continue curV = allClinDict[aKey] numClin = len(curV) tmpV = [0] * numClin print " ... looping over %d values ... default new value is zero " % (numClin) for kk in range(numClin): print kk, allClinDict[aKey][kk], aLabel, type(allClinDict[aKey][kk]), type(aLabel) if (allClinDict[aKey][kk] == "NA"): tmpV[kk] = "NA" elif (str(allClinDict[aKey][kk]).lower() == str(aLabel).lower()): tmpV[kk] = 1 print " adding new feature : ", newLabel allClinDict[newLabel] = tmpV return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# def addPairwiseIndicatorFeatures(allClinDict): magicStrings = ["patient", "person", "vital", "surviv", "race", "ethnic", "prior", "gender", "age_at", "ageat", "radiation", "chemo", "therapy", "treat", "performance", "days_to_", "daysto", "year_of", "yearof", "surgical", "recurrence", "pregnancies"] print " " print " " keyList = allClinDict.keys() keyList.sort() for aKey in keyList: if (aKey == "bcr_patient_barcode"): continue (keyType, nCount, nNA, nCard, labelList, labelCount) = miscClin.lookAtKey(allClinDict[aKey]) if (keyType == "NOMINAL"): # we do this only for categorical features with 3-9 categories if (nCard > 2 and nCard < 10): print " " print " in addPairwiseIndicatorFeatures ... ", aKey, keyType, nCard, labelList, labelCount for ak in range(len(labelList)): aLabel = labelList[ak] for bk in range(ak + 1, len(labelList)): bLabel = labelList[bk] print " aLabel=<%s> bLabel=<%s> " % (aLabel, bLabel) try: if (aKey[1] == ":" and aKey[6] == ":"): i1 = aKey[7:].find(':') if (i1 < 0): i1 = len(aKey) i2 = len(aKey) else: i1 = i1 + 7 i2 = aKey[(i1 + 1):].find(':') if (i2 < 0): i2 = len(aKey) if (i2 > 0 and i2 < len(aKey)): newLabel = "B:" + \ aKey[ 2:7] + "I(" + aLabel + "," + bLabel + "|" + aKey[7:i1] + ")" + aKey[i2:] else: newLabel = "B:" + \ aKey[ 2:7] + "I(" + aLabel + "," + bLabel + "|" + aKey[7:i1] + ")" + "::::" else: # here we really need to have some way to guess whether this # should be a CLIN or a SAMP feature ... typeString = "UNK" for aString in magicStrings: if (aKey.find(aString) >= 0): typeString = "CLIN" if (typeString == "UNK"): print " defaulting to type SAMP for this feature : <%s> " % (aKey) typeString = "SAMP" newLabel = "B:" + typeString + ":" + \ "I(" + aLabel + "," + bLabel + "|" + aKey + ")" + \ "::::" except: print " NOT continuing in addPairwiseIndicatorFeatures !!! ", aLabel, bLabel, aKey continue # make sure there are no blanks ... newLabel = tsvIO.replaceBlanks(newLabel, "_") print " --> new label: <%s> " % newLabel if (newLabel in allClinDict.keys()): print " this indicator variable already exists so I will not make a new one ... ", newLabel continue curV = allClinDict[aKey] numClin = len(curV) tmpV = ["NA"] * numClin for kk in range(numClin): if (allClinDict[aKey][kk].lower() == aLabel.lower()): tmpV[kk] = 1 elif (allClinDict[aKey][kk].lower() == bLabel.lower()): tmpV[kk] = 0 print " adding new feature : ", newLabel # print tmpV allClinDict[newLabel] = tmpV return (allClinDict) # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# if __name__ == "__main__": if ( (len(sys.argv)!=4) and (len(sys.argv)!=5) ): print " Usage : %s <input TSV> <output TSV> <public/private> [auxName] " % sys.argv[0] print " ERROR -- bad command line arguments " sys.exit(-1) tsvNameIn = sys.argv[1] tsvNameOut = sys.argv[2] ppString = sys.argv[3] if ( len(sys.argv) == 5 ): auxName = sys.argv[4] else: auxName = "aux" # test out readTSV ... ## tsvName = "coad_read_clinical.27jan.tsv" print " " print " ****************************************************************** " print " reading input file <%s> " % tsvNameIn allClinDict = tsvIO.readTSV(tsvNameIn) print " A " checkBarcodes(allClinDict) # take a look ... (naCounts, otherCounts) = miscClin.lookAtClinDict(allClinDict) if (1): # remove constant-value keys ... allClinDict = miscClin.removeConstantKeys(allClinDict) print " B " checkBarcodes(allClinDict) if (1): # remove uninformative keys ... allClinDict = miscClin.removeUninformativeKeys(allClinDict) print " C " checkBarcodes(allClinDict) # check the tumor stage based on the other T/N/M definitions, update if possible # (and if the original setting was "NA") if (1): allClinDict = checkTumorStage(allClinDict) print " D " checkBarcodes(allClinDict) # new as of 16aug13 ... vital_status strings are inconsistent between # 'living' or 'alive' or 'deceased' or 'dead' ... # --> standard should be "Alive" or "Dead" if (1): allClinDict = checkVitalStatus(allClinDict) print " E " checkBarcodes(allClinDict) # new as of 13sep13 ... makig 'age' a continuous feature that # exactly matches the days_to_birth ... if (1): allClinDict = updateAge(allClinDict) print " F " checkBarcodes(allClinDict) # remap some categorical features to numerical features ... # oh, this shouldn't still be here, should it ??? 15aug2014 if (0): allClinDict = remapCategoricalFeatures(allClinDict) print " G " checkBarcodes(allClinDict) # add the lymphnodes_positive fraction ... allClinDict = computeLymphnodesFraction(allClinDict) print " H " checkBarcodes(allClinDict) # fill in some missing information that we have collected from elsewhere # ... if (0): allClinDict = addMissingInfo(allClinDict) print " I " checkBarcodes(allClinDict) # NEW: look at some of the "days_to_" fields and do some fix-ups ... if (1): allClinDict = addFollowupInfo(allClinDict) print " J " checkBarcodes(allClinDict) # new as of 04dec13 ... checking that vital_status and various days_to_??? # features are consistent ... if (1): allClinDict = checkFollowupInfo(allClinDict) print " K " checkBarcodes(allClinDict) # take a look at the updated dictionary ... (naCounts, otherCounts) = miscClin.lookAtClinDict(allClinDict) if (1): # remove constant-value keys ... allClinDict = miscClin.removeConstantKeys(allClinDict) print " L " checkBarcodes(allClinDict) if (0): # removing this ... 02Feb2012 SMR # filter out keys with too little information ... # or maybe leave nearly everything in ;-) categorical_naFracThresh = 0.90 numerical_naFracThresh = 0.90 classSize_minFracThresh = 0. classSize_maxFracThresh = 0.995 allClinDict = miscClin.filterClinDict(allClinDict, categorical_naFracThresh, numerical_naFracThresh, classSize_minFracThresh, classSize_maxFracThresh) print " M " checkBarcodes(allClinDict) # try to abbreviate clinical feature strings allClinDict = abbrevCategStrings(allClinDict) print " N " checkBarcodes(allClinDict) if (0): # automatically generate indicator features for remaining categorical # features allClinDict = addIndicatorFeatures(allClinDict) print " O " checkBarcodes(allClinDict) # new 10Feb2012 : add pairwise indicator features allClinDict = addPairwiseIndicatorFeatures(allClinDict) print " P " checkBarcodes(allClinDict) # new 09Jan2013 : try to add numeric features that map the non-binary categorical features ... # as of 06Aug2014, this is only done for "private" runs if ( ppString == "private" ): allClinDict = addDerivedFeatures(allClinDict, auxName) print " Q " checkBarcodes(allClinDict) # look at pairwise MI ... if (0): print " " print " ****************************************** " print " * looking at pairwise Mutual Information * " print " ****************************************** " print " " miscClin.pairwiseMI(allClinDict, "miNetwork.A.13feb12") # look at the data again and re-pick the 'best' key order ... (naCounts, otherCounts) = miscClin.lookAtClinDict(allClinDict) bestKeyOrder = miscClin.getBestKeyOrder(allClinDict, naCounts) doWriteTSV = 1 if (doWriteTSV): outName = tsvNameOut tsvIO.writeTSV_clinical(allClinDict, bestKeyOrder, outName) if (1): outName = tsvNameOut[:-4] + ".flipNumeric.tsv" tsvIO.writeTSV_clinicalFlipNumeric( allClinDict, bestKeyOrder, outName) print " " print " " print " FINISHED " print " " print " " # -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#<|fim▁end|>
## deltaDays is either (days_to_sample_procurement) - (days_to_initial_pathologic_diagnosis) ## or just (days_to_sample_procurement) if (haveT):
<|file_name|>0109_v370_job_template_organization_field.py<|end_file_name|><|fim▁begin|># Generated by Django 2.2.4 on 2019-08-07 19:56 import awx.main.utils.polymorphic import awx.main.fields from django.db import migrations, models import django.db.models.deletion from awx.main.migrations._rbac import ( rebuild_role_parentage, rebuild_role_hierarchy, migrate_ujt_organization, migrate_ujt_organization_backward, restore_inventory_admins, restore_inventory_admins_backward ) def rebuild_jt_parents(apps, schema_editor): rebuild_role_parentage(apps, schema_editor, models=('jobtemplate',)) class Migration(migrations.Migration): dependencies = [ ('main', '0108_v370_unifiedjob_dependencies_processed'), ] operations = [ # backwards parents and ancestors caching migrations.RunPython(migrations.RunPython.noop, rebuild_jt_parents), # add new organization field for JT and all other unified jobs migrations.AddField( model_name='unifiedjob', name='tmp_organization', field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this unified job.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobs', to='main.Organization'), ), migrations.AddField( model_name='unifiedjobtemplate', name='tmp_organization', field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this template.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobtemplates', to='main.Organization'), ), # while new and old fields exist, copy the organization fields migrations.RunPython(migrate_ujt_organization, migrate_ujt_organization_backward), # with data saved, remove old fields<|fim▁hole|> name='organization', ), migrations.RemoveField( model_name='workflowjobtemplate', name='organization', ), # now, without safely rename the new field without conflicts from old field migrations.RenameField( model_name='unifiedjobtemplate', old_name='tmp_organization', new_name='organization', ), migrations.RenameField( model_name='unifiedjob', old_name='tmp_organization', new_name='organization', ), # parentage of job template roles has genuinely changed at this point migrations.AlterField( model_name='jobtemplate', name='admin_role', field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.job_template_admin_role'], related_name='+', to='main.Role'), ), migrations.AlterField( model_name='jobtemplate', name='execute_role', field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role', 'organization.execute_role'], related_name='+', to='main.Role'), ), migrations.AlterField( model_name='jobtemplate', name='read_role', field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'], related_name='+', to='main.Role'), ), # Re-compute the role parents and ancestors caching migrations.RunPython(rebuild_jt_parents, migrations.RunPython.noop), # for all permissions that will be removed, make them explicit migrations.RunPython(restore_inventory_admins, restore_inventory_admins_backward), ]<|fim▁end|>
migrations.RemoveField( model_name='project',
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!python # coding=utf-8 # Package level logger import logging logger = logging.getLogger("pocean") logger.addHandler(logging.NullHandler())<|fim▁hole|>__version__ = "1.0.0"<|fim▁end|>
<|file_name|>keyword_provider_unittest.cc<|end_file_name|><|fim▁begin|>// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/omnibox/browser/keyword_provider.h" #include <stddef.h> #include <map> #include <memory> #include <utility> #include "base/command_line.h" #include "base/metrics/field_trial.h" #include "base/stl_util.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" #include "base/test/task_environment.h" #include "components/omnibox/browser/autocomplete_match.h" #include "components/omnibox/browser/autocomplete_scheme_classifier.h" #include "components/omnibox/browser/mock_autocomplete_provider_client.h" #include "components/omnibox/browser/omnibox_field_trial.h" #include "components/search_engines/search_engines_switches.h" #include "components/search_engines/template_url.h" #include "components/search_engines/template_url_service.h" #include "components/variations/variations_associated_data.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/metrics_proto/omnibox_event.pb.h" #include "url/gurl.h" #include "url/url_constants.h" using base::ASCIIToUTF16; namespace { class TestingSchemeClassifier : public AutocompleteSchemeClassifier { public: metrics::OmniboxInputType GetInputTypeForScheme( const std::string& scheme) const override { DCHECK_EQ(scheme, base::ToLowerASCII(scheme)); if (scheme == url::kHttpScheme || scheme == url::kHttpsScheme) return metrics::OmniboxInputType::URL; return metrics::OmniboxInputType::EMPTY; } }; } // namespace class KeywordProviderTest : public testing::Test { protected: template<class ResultType> struct MatchType { const ResultType member; bool allowed_to_be_default_match; }; template<class ResultType> struct TestData { const base::string16 input; const size_t num_results; const MatchType<ResultType> output[3]; }; KeywordProviderTest() : kw_provider_(nullptr) { variations::testing::ClearAllVariationParams(); } ~KeywordProviderTest() override {} // Should be called at least once during a test case. This is a separate // function from SetUp() because the client may want to set parameters // (e.g., field trials) before initializing TemplateURLService and the // related internal variables here. void SetUpClientAndKeywordProvider(); void TearDown() override; template<class ResultType> void RunTest(TestData<ResultType>* keyword_cases, int num_cases, ResultType AutocompleteMatch::* member); protected: static const TemplateURLService::Initializer kTestData[]; base::test::TaskEnvironment task_environment_; scoped_refptr<KeywordProvider> kw_provider_; std::unique_ptr<MockAutocompleteProviderClient> client_; }; // static const TemplateURLService::Initializer KeywordProviderTest::kTestData[] = { {"aa", "aa.com?foo={searchTerms}", "aa"}, {"aaaa", "http://aaaa/?aaaa=1&b={searchTerms}&c", "aaaa"}, {"aaaaa", "{searchTerms}", "aaaaa"}, {"ab", "bogus URL {searchTerms}", "ab"}, {"weasel", "weasel{searchTerms}weasel", "weasel"}, {"www", " +%2B?={searchTerms}foo ", "www"}, {"nonsub", "http://nonsubstituting-keyword.com/", "nonsub"}, {"z", "{searchTerms}=z", "z"}, {"host.site.com", "http://host.site.com/?q={searchTerms}", "host.site.com"}, {"ignoremelong.domain.com", "http://ignoremelong.domain.com/?q={searchTerms}", "ignoremelong.domain.com"}, {"ignoreme.domain2.com", "http://ignoreme.domain2.com/?q={searchTerms}", "ignoreme.domain2.com"}, {"fooshort.com", "http://fooshort.com/?q={searchTerms}", "fooshort.com"}, {"foolong.co.uk", "http://foolong.co.uk/?q={searchTerms}", "foolong.co.uk"}, {"cleantestv1.com", "http://cleantestv1.com?q={searchTerms}", "clean v1"}, {"cleantestv2.com", "http://cleantestv2.com?q={searchTerms}", "clean v2"}, {"www.cleantestv2.com", "http://www.cleantestv2.com?q={searchTerms}", "www clean v2"}, {"www.cleantestv3.com", "http://www.cleantestv3.com?q={searchTerms}", "www clean v3"}, {"http://cleantestv4.com", "http://cleantestv4.com?q={searchTerms}", "http clean v4"}, {"cleantestv5.com", "http://cleantestv5.com?q={searchTerms}", "clean v5"}, {"http://cleantestv5.com", "http://cleantestv5.com?q={searchTerms}", "http clean v5"}, {"cleantestv6:", "http://cleantestv6.com?q={searchTerms}", "clean v6"}, {"cleantestv7/", "http://cleantestv7slash.com?q={searchTerms}", "clean v7 slash"}, {"cleantestv8/", "http://cleantestv8.com?q={searchTerms}", "clean v8"}, {"cleantestv8", "http://cleantestv8slash.com?q={searchTerms}", "clean v8 slash"}, }; void KeywordProviderTest::SetUpClientAndKeywordProvider() { client_.reset(new MockAutocompleteProviderClient()); client_->set_template_url_service( std::make_unique<TemplateURLService>(kTestData, base::size(kTestData))); kw_provider_ = new KeywordProvider(client_.get(), nullptr); } void KeywordProviderTest::TearDown() { client_.reset(); kw_provider_ = nullptr; } template<class ResultType> void KeywordProviderTest::RunTest(TestData<ResultType>* keyword_cases, int num_cases, ResultType AutocompleteMatch::* member) { ACMatches matches; for (int i = 0; i < num_cases; ++i) { SCOPED_TRACE(keyword_cases[i].input); AutocompleteInput input(keyword_cases[i].input, metrics::OmniboxEventProto::OTHER, TestingSchemeClassifier()); kw_provider_->Start(input, false); EXPECT_TRUE(kw_provider_->done()); matches = kw_provider_->matches(); ASSERT_EQ(keyword_cases[i].num_results, matches.size()); for (size_t j = 0; j < matches.size(); ++j) { EXPECT_EQ(keyword_cases[i].output[j].member, matches[j].*member); EXPECT_EQ(keyword_cases[i].output[j].allowed_to_be_default_match, matches[j].allowed_to_be_default_match); } } } TEST_F(KeywordProviderTest, Edit) { const MatchType<base::string16> kEmptyMatch = { base::string16(), false }; TestData<base::string16> edit_cases[] = { // Searching for a nonexistent prefix should give nothing. {ASCIIToUTF16("Not Found"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("aaaaaNot Found"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, // Check that tokenization only collapses whitespace between first tokens, // no-query-input cases have a space appended, and action is not escaped. {ASCIIToUTF16("z"), 1, {{ASCIIToUTF16("z "), true}, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("z \t"), 1, {{ASCIIToUTF16("z "), true}, kEmptyMatch, kEmptyMatch}}, // Check that exact, substituting keywords with a verbatim search term // don't generate a result. (These are handled by SearchProvider.) {ASCIIToUTF16("z foo"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("z a b c++"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, // Matches should be limited to three, and sorted in quality order, not // alphabetical. {ASCIIToUTF16("aaa"), 2, {{ASCIIToUTF16("aaaa "), false}, {ASCIIToUTF16("aaaaa "), false}, kEmptyMatch}}, {ASCIIToUTF16("a 1 2 3"), 3, {{ASCIIToUTF16("aa 1 2 3"), false}, {ASCIIToUTF16("ab 1 2 3"), false}, {ASCIIToUTF16("aaaa 1 2 3"), false}}}, {ASCIIToUTF16("www.a"), 3, {{ASCIIToUTF16("aa "), false}, {ASCIIToUTF16("ab "), false}, {ASCIIToUTF16("aaaa "), false}}}, {ASCIIToUTF16("foo hello"), 2, {{ASCIIToUTF16("fooshort.com hello"), false}, {ASCIIToUTF16("foolong.co.uk hello"), false}, kEmptyMatch}}, // Exact matches should prevent returning inexact matches. Also, the // verbatim query for this keyword match should not be returned. (It's // returned by SearchProvider.) {ASCIIToUTF16("aaaa foo"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("www.aaaa foo"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, // Matches should be retrieved by typing the prefix of the keyword, not // the // domain name. {ASCIIToUTF16("host foo"), 1, {{ASCIIToUTF16("host.site.com foo"), false}, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("host.site foo"), 1, {{ASCIIToUTF16("host.site.com foo"), false}, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("site foo"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, // Clean up keyword input properly. "http" and "https" are the only // allowed schemes. {ASCIIToUTF16("www"), 1, {{ASCIIToUTF16("www "), true}, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("www."), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, // In this particular example, stripping the "www." from "www.FOO" means // we can allow matching against keywords that explicitly start with // "FOO", even if "FOO" happens to be "www". It's a little odd yet it // seems reasonable. {ASCIIToUTF16("www.w w"), 3, {{ASCIIToUTF16("www w"), false}, {ASCIIToUTF16("weasel w"), false}, {ASCIIToUTF16("www.cleantestv2.com w"), false}}}, {ASCIIToUTF16("http://www"), 1, {{ASCIIToUTF16("www "), true}, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("http://www."), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("ftp: blah"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("mailto:z"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("ftp://z"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("https://z"), 1, {{ASCIIToUTF16("z "), true}, kEmptyMatch, kEmptyMatch}}, // Non-substituting keywords, whether typed fully or not // should not add a space. {ASCIIToUTF16("nonsu"), 1, {{ASCIIToUTF16("nonsub"), false}, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("nonsub"), 1, {{ASCIIToUTF16("nonsub"), true}, kEmptyMatch, kEmptyMatch}}, }; SetUpClientAndKeywordProvider(); RunTest<base::string16>(edit_cases, base::size(edit_cases), &AutocompleteMatch::fill_into_edit); } TEST_F(KeywordProviderTest, DomainMatches) { const MatchType<base::string16> kEmptyMatch = { base::string16(), false }; TestData<base::string16> edit_cases[] = { // Searching for a nonexistent prefix should give nothing. { ASCIIToUTF16("Not Found"), 0, { kEmptyMatch, kEmptyMatch, kEmptyMatch } }, { ASCIIToUTF16("aaaaaNot Found"), 0, { kEmptyMatch, kEmptyMatch, kEmptyMatch } }, // Matches should be limited to three and sorted in quality order. // This order depends on whether we're using the pre-domain-name text // for matching--when matching the domain, we sort by the length of the<|fim▁hole|> // domain, not the length of the whole keyword. { ASCIIToUTF16("ignore foo"), 2, { { ASCIIToUTF16("ignoreme.domain2.com foo"), false }, { ASCIIToUTF16("ignoremelong.domain.com foo"), false }, kEmptyMatch } }, { ASCIIToUTF16("dom foo"), 2, { { ASCIIToUTF16("ignoremelong.domain.com foo"), false }, { ASCIIToUTF16("ignoreme.domain2.com foo"), false }, kEmptyMatch } }, // Matches should be retrieved by typing the domain name, not only // a prefix to the keyword. { ASCIIToUTF16("host foo"), 1, { { ASCIIToUTF16("host.site.com foo"), false }, kEmptyMatch, kEmptyMatch } }, { ASCIIToUTF16("host.site foo"), 1, { { ASCIIToUTF16("host.site.com foo"), false }, kEmptyMatch, kEmptyMatch } }, { ASCIIToUTF16("site foo"), 1, { { ASCIIToUTF16("host.site.com foo"), false }, kEmptyMatch, kEmptyMatch } }, }; // Add a rule enabling matching in the domain name of keywords (i.e., // non-prefix matching). { std::map<std::string, std::string> params; params[OmniboxFieldTrial::kKeywordRequiresPrefixMatchRule] = "false"; ASSERT_TRUE(variations::AssociateVariationParams( OmniboxFieldTrial::kBundledExperimentFieldTrialName, "A", params)); } base::FieldTrialList::CreateFieldTrial( OmniboxFieldTrial::kBundledExperimentFieldTrialName, "A"); SetUpClientAndKeywordProvider(); RunTest<base::string16>(edit_cases, base::size(edit_cases), &AutocompleteMatch::fill_into_edit); } TEST_F(KeywordProviderTest, IgnoreRegistryForScoring) { const MatchType<base::string16> kEmptyMatch = { base::string16(), false }; TestData<base::string16> edit_cases[] = { // Matches should be limited to three and sorted in quality order. // When ignoring the registry length, this order of suggestions should // result (sorted by keyword length sans registry). The "Edit" test case // has this exact test for when not ignoring the registry to check that // the other order (shorter full keyword) results there. { ASCIIToUTF16("foo hello"), 2, { { ASCIIToUTF16("foolong.co.uk hello"), false }, { ASCIIToUTF16("fooshort.com hello"), false }, kEmptyMatch } }, // Keywords that don't have full hostnames should keep the same order // as normal. { ASCIIToUTF16("aaa"), 2, { { ASCIIToUTF16("aaaa "), false }, { ASCIIToUTF16("aaaaa "), false }, kEmptyMatch } }, { ASCIIToUTF16("a 1 2 3"), 3, { { ASCIIToUTF16("aa 1 2 3"), false }, { ASCIIToUTF16("ab 1 2 3"), false }, { ASCIIToUTF16("aaaa 1 2 3"), false } } }, { ASCIIToUTF16("www.a"), 3, { { ASCIIToUTF16("aa "), false }, { ASCIIToUTF16("ab "), false }, { ASCIIToUTF16("aaaa "), false } } }, }; // Add a rule to make matching in the registry portion of a keyword // unimportant. { std::map<std::string, std::string> params; params[OmniboxFieldTrial::kKeywordRequiresRegistryRule] = "false"; ASSERT_TRUE(variations::AssociateVariationParams( OmniboxFieldTrial::kBundledExperimentFieldTrialName, "A", params)); } base::FieldTrialList::CreateFieldTrial( OmniboxFieldTrial::kBundledExperimentFieldTrialName, "A"); SetUpClientAndKeywordProvider(); RunTest<base::string16>(edit_cases, base::size(edit_cases), &AutocompleteMatch::fill_into_edit); } TEST_F(KeywordProviderTest, DISABLED_URL) { const MatchType<GURL> kEmptyMatch = { GURL(), false }; TestData<GURL> url_cases[] = { // No query input -> empty destination URL. {ASCIIToUTF16("z"), 1, {{GURL(), true}, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("z \t"), 1, {{GURL(), true}, kEmptyMatch, kEmptyMatch}}, // Check that tokenization only collapses whitespace between first tokens // and query input, but not rest of URL, is escaped. {ASCIIToUTF16("w bar +baz"), 3, {{GURL(" +%2B?=bar+%2Bbazfoo "), false}, {GURL("bar+%2Bbaz=z"), false}, {GURL("http://www.cleantestv2.com/?q=bar+%2Bbaz"), false}}}, // Substitution should work with various locations of the "%s". {ASCIIToUTF16("aaa 1a2b"), 2, {{GURL("http://aaaa/?aaaa=1&b=1a2b&c"), false}, {GURL("1a2b"), false}, kEmptyMatch}}, {ASCIIToUTF16("a 1 2 3"), 3, {{GURL("aa.com?foo=1+2+3"), false}, {GURL("bogus URL 1+2+3"), false}, {GURL("http://aaaa/?aaaa=1&b=1+2+3&c"), false}}}, {ASCIIToUTF16("www.w w"), 3, {{GURL(" +%2B?=wfoo "), false}, {GURL("weaselwweasel"), false}, {GURL("http://www.cleantestv2.com/?q=w"), false}}}, }; SetUpClientAndKeywordProvider(); RunTest<GURL>(url_cases, base::size(url_cases), &AutocompleteMatch::destination_url); } TEST_F(KeywordProviderTest, Contents) { const MatchType<base::string16> kEmptyMatch = { base::string16(), false }; TestData<base::string16> contents_cases[] = { // No query input -> substitute "<Type search term>" into contents. {ASCIIToUTF16("z"), 1, {{ASCIIToUTF16("<Type search term>"), true}, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("z \t"), 1, {{ASCIIToUTF16("<Type search term>"), true}, kEmptyMatch, kEmptyMatch}}, // Exact keyword matches with remaining text should return nothing. {ASCIIToUTF16("www.www www"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, {ASCIIToUTF16("z a b c++"), 0, {kEmptyMatch, kEmptyMatch, kEmptyMatch}}, // Exact keyword matches with remaining text when the keyword is an // extension keyword should return something. This is tested in // chrome/browser/extensions/api/omnibox/omnibox_apitest.cc's // in OmniboxApiTest's Basic test. // There are two keywords that start with "aaa". Suggestions will be // disambiguated by the description. We do not test the description value // here because KeywordProvider doesn't set descriptions; these are // populated later by AutocompleteController. {ASCIIToUTF16("aaa"), 2, {{ASCIIToUTF16("<Type search term>"), false}, {ASCIIToUTF16("<Type search term>"), false}, kEmptyMatch}}, // When there is a search string, simply display it. {ASCIIToUTF16("www.w w"), 3, {{ASCIIToUTF16("w"), false}, {ASCIIToUTF16("w"), false}, {ASCIIToUTF16("w"), false}}}, // Also, check that tokenization only collapses whitespace between first // tokens and contents are not escaped or unescaped. {ASCIIToUTF16("a 1 2+ 3"), 3, {{ASCIIToUTF16("1 2+ 3"), false}, {ASCIIToUTF16("1 2+ 3"), false}, {ASCIIToUTF16("1 2+ 3"), false}}}, }; SetUpClientAndKeywordProvider(); RunTest<base::string16>(contents_cases, base::size(contents_cases), &AutocompleteMatch::contents); } TEST_F(KeywordProviderTest, AddKeyword) { SetUpClientAndKeywordProvider(); TemplateURLData data; data.SetShortName(ASCIIToUTF16("Test")); base::string16 keyword(ASCIIToUTF16("foo")); data.SetKeyword(keyword); data.SetURL("http://www.google.com/foo?q={searchTerms}"); TemplateURL* template_url = client_->GetTemplateURLService()->Add( std::make_unique<TemplateURL>(data)); ASSERT_TRUE( template_url == client_->GetTemplateURLService()->GetTemplateURLForKeyword(keyword)); } TEST_F(KeywordProviderTest, RemoveKeyword) { SetUpClientAndKeywordProvider(); TemplateURLService* template_url_service = client_->GetTemplateURLService(); base::string16 url(ASCIIToUTF16("http://aaaa/?aaaa=1&b={searchTerms}&c")); template_url_service->Remove( template_url_service->GetTemplateURLForKeyword(ASCIIToUTF16("aaaa"))); ASSERT_TRUE(template_url_service->GetTemplateURLForKeyword( ASCIIToUTF16("aaaa")) == nullptr); } TEST_F(KeywordProviderTest, GetKeywordForInput) { SetUpClientAndKeywordProvider(); EXPECT_EQ(ASCIIToUTF16("aa"), kw_provider_->GetKeywordForText(ASCIIToUTF16("aa"))); EXPECT_EQ(base::string16(), kw_provider_->GetKeywordForText(ASCIIToUTF16("aafoo"))); EXPECT_EQ(base::string16(), kw_provider_->GetKeywordForText(ASCIIToUTF16("aa foo"))); EXPECT_EQ( ASCIIToUTF16("cleantestv1.com"), kw_provider_->GetKeywordForText(ASCIIToUTF16("http://cleantestv1.com"))); EXPECT_EQ( ASCIIToUTF16("cleantestv1.com"), kw_provider_->GetKeywordForText(ASCIIToUTF16("www.cleantestv1.com"))); EXPECT_EQ(ASCIIToUTF16("cleantestv1.com"), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv1.com/"))); EXPECT_EQ(ASCIIToUTF16("cleantestv1.com"), kw_provider_->GetKeywordForText( ASCIIToUTF16("https://www.cleantestv1.com/"))); EXPECT_EQ(ASCIIToUTF16("cleantestv2.com"), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv2.com"))); EXPECT_EQ( ASCIIToUTF16("www.cleantestv2.com"), kw_provider_->GetKeywordForText(ASCIIToUTF16("www.cleantestv2.com"))); EXPECT_EQ(ASCIIToUTF16("cleantestv2.com"), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv2.com/"))); EXPECT_EQ( ASCIIToUTF16("www.cleantestv3.com"), kw_provider_->GetKeywordForText(ASCIIToUTF16("www.cleantestv3.com"))); EXPECT_EQ(base::string16(), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv3.com"))); EXPECT_EQ( ASCIIToUTF16("http://cleantestv4.com"), kw_provider_->GetKeywordForText(ASCIIToUTF16("http://cleantestv4.com"))); EXPECT_EQ(base::string16(), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv4.com"))); EXPECT_EQ(ASCIIToUTF16("cleantestv5.com"), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv5.com"))); EXPECT_EQ( ASCIIToUTF16("http://cleantestv5.com"), kw_provider_->GetKeywordForText(ASCIIToUTF16("http://cleantestv5.com"))); EXPECT_EQ(ASCIIToUTF16("cleantestv6:"), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv6:"))); EXPECT_EQ(base::string16(), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv6"))); EXPECT_EQ(ASCIIToUTF16("cleantestv7/"), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv7/"))); EXPECT_EQ(base::string16(), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv7"))); EXPECT_EQ(ASCIIToUTF16("cleantestv8/"), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv8/"))); EXPECT_EQ(ASCIIToUTF16("cleantestv8"), kw_provider_->GetKeywordForText(ASCIIToUTF16("cleantestv8"))); } TEST_F(KeywordProviderTest, GetSubstitutingTemplateURLForInput) { struct { const std::string text; const size_t cursor_position; const bool allow_exact_keyword_match; const std::string expected_url; const std::string updated_text; const size_t updated_cursor_position; } cases[] = { { "foo", base::string16::npos, true, "", "foo", base::string16::npos }, { "aa foo", base::string16::npos, true, "aa.com?foo={searchTerms}", "foo", base::string16::npos }, // Cursor adjustment. { "aa foo", base::string16::npos, true, "aa.com?foo={searchTerms}", "foo", base::string16::npos }, { "aa foo", 4u, true, "aa.com?foo={searchTerms}", "foo", 1u }, // Cursor at the end. { "aa foo", 6u, true, "aa.com?foo={searchTerms}", "foo", 3u }, // Cursor before the first character of the remaining text. { "aa foo", 3u, true, "aa.com?foo={searchTerms}", "foo", 0u }, // Trailing space. { "aa foo ", 7u, true, "aa.com?foo={searchTerms}", "foo ", 4u }, // Trailing space without remaining text, cursor in the middle. { "aa ", 3u, true, "aa.com?foo={searchTerms}", "", base::string16::npos }, // Trailing space without remaining text, cursor at the end. { "aa ", 4u, true, "aa.com?foo={searchTerms}", "", base::string16::npos }, // Extra space after keyword, cursor at the end. { "aa foo ", 8u, true, "aa.com?foo={searchTerms}", "foo ", 4u }, // Extra space after keyword, cursor in the middle. { "aa foo ", 3u, true, "aa.com?foo={searchTerms}", "foo ", 0 }, // Extra space after keyword, no trailing space, cursor at the end. { "aa foo", 7u, true, "aa.com?foo={searchTerms}", "foo", 3u }, // Extra space after keyword, no trailing space, cursor in the middle. { "aa foo", 5u, true, "aa.com?foo={searchTerms}", "foo", 1u }, // Disallow exact keyword match. { "aa foo", base::string16::npos, false, "", "aa foo", base::string16::npos }, }; SetUpClientAndKeywordProvider(); for (size_t i = 0; i < base::size(cases); i++) { AutocompleteInput input( ASCIIToUTF16(cases[i].text), cases[i].cursor_position, metrics::OmniboxEventProto::OTHER, TestingSchemeClassifier()); input.set_allow_exact_keyword_match(cases[i].allow_exact_keyword_match); const TemplateURL* url = KeywordProvider::GetSubstitutingTemplateURLForInput( client_->GetTemplateURLService(), &input); if (cases[i].expected_url.empty()) EXPECT_FALSE(url); else EXPECT_EQ(cases[i].expected_url, url->url()); EXPECT_EQ(ASCIIToUTF16(cases[i].updated_text), input.text()); EXPECT_EQ(cases[i].updated_cursor_position, input.cursor_position()); } } // If extra query params are specified on the command line, they should be // reflected (only) in the default search provider's destination URL. TEST_F(KeywordProviderTest, ExtraQueryParams) { base::CommandLine::ForCurrentProcess()->AppendSwitchASCII( switches::kExtraSearchQueryParams, "a=b"); TestData<GURL> url_cases[] = { { ASCIIToUTF16("a 1 2 3"), 3, { { GURL("aa.com?a=b&foo=1+2+3"), false }, { GURL("bogus URL 1+2+3"), false }, { GURL("http://aaaa/?aaaa=1&b=1+2+3&c"), false } } }, }; SetUpClientAndKeywordProvider(); RunTest<GURL>(url_cases, base::size(url_cases), &AutocompleteMatch::destination_url); } TEST_F(KeywordProviderTest, DoesNotProvideMatchesOnFocus) { SetUpClientAndKeywordProvider(); AutocompleteInput input(ASCIIToUTF16("aaa"), metrics::OmniboxEventProto::OTHER, TestingSchemeClassifier()); input.set_from_omnibox_focus(true); kw_provider_->Start(input, false); ASSERT_TRUE(kw_provider_->matches().empty()); }<|fim▁end|>
<|file_name|>encoder_test.go<|end_file_name|><|fim▁begin|>package toolbox_test import ( "bytes"<|fim▁hole|>) func TestEncoderFactory(t *testing.T) { buffer := new(bytes.Buffer) assert.NotNil(t, toolbox.NewJSONEncoderFactory().Create(buffer)) } func TestMarshalEncoderFactory(t *testing.T) { buffer := new(bytes.Buffer) encoder := toolbox.NewMarshalerEncoderFactory().Create(buffer) foo := &Foo200{"abc"} err := encoder.Encode(foo) assert.Nil(t, err) assert.Equal(t, "abc", string(buffer.Bytes())) err = encoder.Encode(&Foo201{}) assert.NotNil(t, err) } type Foo200 struct { Attr string } func (m *Foo200) Marshal() ([]byte, error) { return []byte(m.Attr), nil } type Foo201 struct { Attr string }<|fim▁end|>
"testing" "github.com/stretchr/testify/assert" "github.com/viant/toolbox"
<|file_name|>eo_view.rs<|end_file_name|><|fim▁begin|>// std imports use std::mem; // external imports use num::traits::Num; // local imports use algebra::structure::MagmaBase; use super::eo_traits::{ERO, ECO}; use matrix::view::MatrixView; use matrix::traits::{Shape, MatrixBuffer, Strided}; /// Implementation of Elementary row operations. impl<'a, T:MagmaBase + Num> ERO<T> for MatrixView<'a, T> { /// Row scaling by a factor and adding to another row. /// r_i = r_i + k * r_j /// The j-th row can be outside the view also. /// This is the row relative to the start of the view. #[inline] fn ero_scale_add(&mut self, i : usize, j : isize, scale : T )-> &mut MatrixView<'a, T> { debug_assert! (i < self.num_rows());<|fim▁hole|> let m = self.matrix(); // Compute j-th row in m (by doing offset) let j = j + (self.start_row() as isize); debug_assert! (j >= 0); let j = j as usize; debug_assert!(j < m.num_rows()); let ptr = m.as_ptr(); // I am allowing modification of the underlying buffer let ptr : *mut T = unsafe { mem::transmute(ptr) }; let sc = self.start_col(); // Compute initial offsets let mut offset_a = self.cell_to_offset(i, 0); let mut offset_b = m.cell_to_offset(j, sc); let stride_a = self.stride() as isize; let stride_b = m.stride() as isize; for _ in 0..self.num_cols(){ unsafe { let va = *ptr.offset(offset_a); let vb = *ptr.offset(offset_b); *ptr.offset(offset_a) = va + scale * vb; } // Update offsets offset_a += stride_a; offset_b += stride_b; } self } } /// Implementation of Elementary column operations. impl<'a, T:MagmaBase + Num> ECO<T> for MatrixView<'a, T> { /// Column scaling by a factor and adding to another column. /// c_i = c_i + k * c_j /// The j-th column can be outside the view also. /// This is the column relative to the start of the view. #[inline] fn eco_scale_add(&mut self, i : usize, j : isize, scale : T )-> &mut MatrixView<'a, T> { debug_assert! (i < self.num_cols()); let m = self.matrix(); // Compute j-th column in m (by doing offset) let j = j + (self.start_col() as isize); debug_assert! (j >= 0); let j = j as usize; debug_assert!(j < m.num_cols()); let ptr = m.as_ptr(); // I am allowing modification of the underlying buffer let ptr : *mut T = unsafe { mem::transmute(ptr) }; let sr = self.start_row(); // Compute initial offsets let mut offset_a = self.cell_to_offset(0, i); let mut offset_b = m.cell_to_offset(sr, j); for _ in 0..self.num_rows(){ unsafe { let va = *ptr.offset(offset_a); let vb = *ptr.offset(offset_b); *ptr.offset(offset_a) = va + scale * vb; } // Update offsets offset_a += 1; offset_b += 1; } self } } /****************************************************** * * Unit tests * *******************************************************/ #[cfg(test)] mod test{ //use super::*; } /****************************************************** * * Bench marks * *******************************************************/ #[cfg(test)] mod bench{ //extern crate test; //use self::test::Bencher; //use super::*; }<|fim▁end|>
<|file_name|>cli_config.py<|end_file_name|><|fim▁begin|># # Copyright 2018 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action.network import ActionModule as ActionNetworkModule <|fim▁hole|> self._config_module = True if self._play_context.connection != 'network_cli': return {'failed': True, 'msg': 'Connection type %s is not valid for cli_config module' % self._play_context.connection} return super(ActionModule, self).run(task_vars=task_vars)<|fim▁end|>
class ActionModule(ActionNetworkModule): def run(self, tmp=None, task_vars=None): del tmp # tmp no longer has any effect
<|file_name|>GameObjectAI.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2016+ AzerothCore <www.azerothcore.org>, released under GNU GPL v2 license: http://github.com/azerothcore/azerothcore-wotlk/LICENSE-GPL2 * Copyright (C) 2008-2016 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/> */ #include "GameObjectAI.h" //GameObjectAI::GameObjectAI(GameObject* g) : go(g) {} int GameObjectAI::Permissible(const GameObject* go) {<|fim▁hole|> return PERMIT_BASE_NO; } NullGameObjectAI::NullGameObjectAI(GameObject* g) : GameObjectAI(g) {}<|fim▁end|>
if (go->GetAIName() == "GameObjectAI") return PERMIT_BASE_SPECIAL;
<|file_name|>racetrackview.py<|end_file_name|><|fim▁begin|>import kivy kivy.require('1.9.1') from kivy.uix.label import Label from kivy.uix.boxlayout import BoxLayout from kivy.uix.widget import Widget from kivy.uix.scatter import Scatter from kivy.app import Builder from kivy.metrics import dp from kivy.graphics import Color, Line from autosportlabs.racecapture.geo.geopoint import GeoPoint from autosportlabs.uix.track.trackmap import TrackMapView from utils import * Builder.load_file('autosportlabs/uix/track/racetrackview.kv') class RaceTrackView(BoxLayout): def __init__(self, **kwargs): super(RaceTrackView, self).__init__(**kwargs) def loadTrack(self, track): self.initMap(track) def initMap(self, track): self.ids.trackmap.setTrackPoints(track.map_points) def remove_reference_mark(self, key): self.ids.trackmap.remove_marker(key) def add_reference_mark(self, key, color): trackmap = self.ids.trackmap if trackmap.get_marker(key) is None: trackmap.add_marker(key, color) def update_reference_mark(self, key, geo_point): self.ids.trackmap.update_marker(key, geo_point) def add_map_path(self, key, path, color): self.ids.trackmap.add_path(key, path, color) def remove_map_path(self, key): self.ids.trackmap.remove_path(key) def add_heat_values(self, key, heat_values): self.ids.trackmap.add_heat_values(key, heat_values) <|fim▁hole|><|fim▁end|>
def remove_heat_values(self, key): self.ids.trackmap.remove_heat_values(key)
<|file_name|>PersistentCollection.java<|end_file_name|><|fim▁begin|>/* * Hibernate, Relational Persistence for Idiomatic Java * * Copyright (c) 2008-2011, Red Hat Inc. or third-party contributors as * indicated by the @author tags or express copyright attribution * statements applied by the authors. All third-party contributions are * distributed under license by Red Hat Inc. * * This copyrighted material is made available to anyone wishing to use, modify, * copy, or redistribute it subject to the terms and conditions of the GNU * Lesser General Public License, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License * for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this distribution; if not, write to: * Free Software Foundation, Inc. * 51 Franklin Street, Fifth Floor * Boston, MA 02110-1301 USA */ package org.hibernate.collection.spi; import java.io.Serializable; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Collection; import java.util.Iterator; import org.hibernate.HibernateException; import org.hibernate.engine.spi.SessionImplementor; import org.hibernate.loader.CollectionAliases; import org.hibernate.persister.collection.CollectionPersister; import org.hibernate.type.Type; /** * Persistent collections are treated as value objects by Hibernate. * ie. they have no independent existence beyond the object holding * a reference to them. Unlike instances of entity classes, they are * automatically deleted when unreferenced and automatically become * persistent when held by a persistent object. Collections can be * passed between different objects (change "roles") and this might * cause their elements to move from one database table to another.<br> * <br> * Hibernate "wraps" a java collection in an instance of * PersistentCollection. This mechanism is designed to support * tracking of changes to the collection's persistent state and * lazy instantiation of collection elements. The downside is that * only certain abstract collection types are supported and any * extra semantics are lost<br> * <br> * Applications should <em>never</em> use classes in this package * directly, unless extending the "framework" here.<br> * <br> * Changes to <em>structure</em> of the collection are recorded by the * collection calling back to the session. Changes to mutable * elements (ie. composite elements) are discovered by cloning their * state when the collection is initialized and comparing at flush * time. * * @author Gavin King */ public interface PersistentCollection { /** * Get the owning entity. Note that the owner is only * set during the flush cycle, and when a new collection * wrapper is created while loading an entity. * * @return The owner */ public Object getOwner(); /** * Set the reference to the owning entity * * @param entity The owner */ public void setOwner(Object entity); /** * Is the collection empty? (don't try to initialize the collection) * * @return {@code false} if the collection is non-empty; {@code true} otherwise. */ public boolean empty(); /** * After flushing, re-init snapshot state. * * @param key The collection instance key (fk value). * @param role The collection role * @param snapshot The snapshot state */ public void setSnapshot(Serializable key, String role, Serializable snapshot); /** * After flushing, clear any "queued" additions, since the * database state is now synchronized with the memory state. */ public void postAction(); /** * Return the user-visible collection (or array) instance * * @return The underlying collection/array */ public Object getValue(); /** * Called just before reading any rows from the JDBC result set */ public void beginRead(); /** * Called after reading all rows from the JDBC result set * * @return Whether to end the read. */ public boolean endRead(); /** * Called after initializing from cache * * @return ?? */ public boolean afterInitialize(); /** * Could the application possibly have a direct reference to * the underlying collection implementation? * * @return {@code true} indicates that the application might have access to the underlying collection/array. */ public boolean isDirectlyAccessible(); /** * Disassociate this collection from the given session. * * @param currentSession The session we are disassociating from. Used for validations. * * @return true if this was currently associated with the given session */ public boolean unsetSession(SessionImplementor currentSession); /** * Associate the collection with the given session. * * @param session The session to associate with * * @return false if the collection was already associated with the session * * @throws HibernateException if the collection was already associated * with another open session */ public boolean setCurrentSession(SessionImplementor session) throws HibernateException; /** * Read the state of the collection from a disassembled cached value * * @param persister The collection persister * @param disassembled The disassembled cached state * @param owner The collection owner */ public void initializeFromCache(CollectionPersister persister, Serializable disassembled, Object owner); /** * Iterate all collection entries, during update of the database * * @param persister The collection persister. * * @return The iterator */ public Iterator entries(CollectionPersister persister); /** * Read a row from the JDBC result set * * @param rs The JDBC ResultSet * @param role The collection role * @param descriptor The aliases used for the columns making up the collection * @param owner The collection owner * * @return The read object * * @throws HibernateException Generally indicates a problem resolving data read from the ResultSet * @throws SQLException Indicates a problem accessing the ResultSet */ public Object readFrom(ResultSet rs, CollectionPersister role, CollectionAliases descriptor, Object owner) throws HibernateException, SQLException; /** * Get the identifier of the given collection entry. This refers to the collection identifier, not the * identifier of the (possibly) entity elements. This is only valid for invocation on the * {@code idbag} collection. * * @param entry The collection entry/element * @param i The assumed identifier (?) * * @return The identifier value */ public Object getIdentifier(Object entry, int i); /** * Get the index of the given collection entry * * @param entry The collection entry/element * @param i The assumed index * @param persister it was more elegant before we added this... * * @return The index value */ public Object getIndex(Object entry, int i, CollectionPersister persister); /** * Get the value of the given collection entry. Generally the given entry parameter value will just be returned. * Might get a different value for a duplicate entries in a Set. * * @param entry The object instance for which to get the collection element instance. * * @return The corresponding object that is part of the collection elements. */ public Object getElement(Object entry); /** * Get the snapshot value of the given collection entry * * @param entry The entry * @param i The index * * @return The snapshot state for that element */ public Object getSnapshotElement(Object entry, int i); /** * Called before any elements are read into the collection, * allowing appropriate initializations to occur. * * @param persister The underlying collection persister. * @param anticipatedSize The anticipated size of the collection after initialization is complete. */ public void beforeInitialize(CollectionPersister persister, int anticipatedSize); /** * Does the current state exactly match the snapshot? * * @param persister The collection persister * * @return {@code true} if the current state and the snapshot state match. * */ public boolean equalsSnapshot(CollectionPersister persister); /** * Is the snapshot empty? * * @param snapshot The snapshot to check * * @return {@code true} if the given snapshot is empty */ public boolean isSnapshotEmpty(Serializable snapshot); <|fim▁hole|> * * @param persister The collection persister * * @return The disassembled state */ public Serializable disassemble(CollectionPersister persister) ; /** * Do we need to completely recreate this collection when it changes? * * @param persister The collection persister * * @return {@code true} if a change requires a recreate. */ public boolean needsRecreate(CollectionPersister persister); /** * Return a new snapshot of the current state of the collection * * @param persister The collection persister * * @return The snapshot */ public Serializable getSnapshot(CollectionPersister persister); /** * To be called internally by the session, forcing immediate initialization. */ public void forceInitialization(); /** * Does the given element/entry exist in the collection? * * @param entry The object to check if it exists as a collection element * @param i Unused * * @return {@code true} if the given entry is a collection element */ public boolean entryExists(Object entry, int i); /** * Do we need to insert this element? * * @param entry The collection element to check * @param i The index (for indexed collections) * @param elemType The type for the element * * @return {@code true} if the element needs inserting */ public boolean needsInserting(Object entry, int i, Type elemType); /** * Do we need to update this element? * * @param entry The collection element to check * @param i The index (for indexed collections) * @param elemType The type for the element * * @return {@code true} if the element needs updating */ public boolean needsUpdating(Object entry, int i, Type elemType); /** * Can each element in the collection be mapped unequivocally to a single row in the database? Generally * bags and sets are the only collections that cannot be. * * @return {@code true} if the row for each element is known */ public boolean isRowUpdatePossible(); /** * Get all the elements that need deleting * * @param persister The collection persister * @param indexIsFormula For indexed collections, tells whether the index is a formula (calculated value) mapping * * @return An iterator over the elements to delete */ public Iterator getDeletes(CollectionPersister persister, boolean indexIsFormula); /** * Is this the wrapper for the given collection instance? * * @param collection The collection to check whether this is wrapping it * * @return {@code true} if this is a wrapper around that given collection instance. */ public boolean isWrapper(Object collection); /** * Is this instance initialized? * * @return Was this collection initialized? Or is its data still not (fully) loaded? */ public boolean wasInitialized(); /** * Does this instance have any "queued" operations? * * @return {@code true} indicates there are pending, queued, delayed operations */ public boolean hasQueuedOperations(); /** * Iterator over the "queued" additions * * @return The iterator */ public Iterator queuedAdditionIterator(); /** * Get the "queued" orphans * * @param entityName The name of the entity that makes up the elements * * @return The orphaned elements */ public Collection getQueuedOrphans(String entityName); /** * Get the current collection key value * * @return the current collection key value */ public Serializable getKey(); /** * Get the current role name * * @return the collection role name */ public String getRole(); /** * Is the collection unreferenced? * * @return {@code true} if the collection is no longer referenced by an owner */ public boolean isUnreferenced(); /** * Is the collection dirty? Note that this is only * reliable during the flush cycle, after the * collection elements are dirty checked against * the snapshot. * * @return {@code true} if the collection is dirty */ public boolean isDirty(); /** * Clear the dirty flag, after flushing changes * to the database. */ public void clearDirty(); /** * Get the snapshot cached by the collection instance * * @return The internally stored snapshot state */ public Serializable getStoredSnapshot(); /** * Mark the collection as dirty */ public void dirty(); /** * Called before inserting rows, to ensure that any surrogate keys * are fully generated * * @param persister The collection persister */ public void preInsert(CollectionPersister persister); /** * Called after inserting a row, to fetch the natively generated id * * @param persister The collection persister * @param entry The collection element just inserted * @param i The element position/index */ public void afterRowInsert(CollectionPersister persister, Object entry, int i); /** * get all "orphaned" elements * * @param snapshot The snapshot state * @param entityName The name of the entity that are the elements of the collection * * @return The orphans */ public Collection getOrphans(Serializable snapshot, String entityName); }<|fim▁end|>
/** * Disassemble the collection to get it ready for the cache
<|file_name|>exercise3.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python """ Assignment 1, Exercise 3, INF1340, Fall, 2015. Troubleshooting Car Issues. This module contains one function diagnose_car(). It is an expert system to interactive diagnose car issues. """ __author__ = 'Susan Sim' __email__ = "[email protected]" __copyright__ = "2015 Susan Sim" __license__ = "MIT License" """ """ # Interactively queries the user with yes/no questions to identify a possible issue with a car. # Inputs: As is but not nested - same indentation all the way through # Expected Outputs: To follow the decision logic of the question tree # Errors: Did not proceed according to logic. fixed by nesting properly """ """ def diagnose_car(): silent = raw_input("Is the car silent when you turn the key? ") #this begins the line of questions on the left side of the question tree if silent == 'Y': corroded = raw_input("Are the battery terminals corroded?") if corroded == 'Y': print "Clean terminals and try starting again." elif corroded == 'N': print "Replace cables and try again." elif silent == 'N': #this begins the line of questions on the right side of the question tree clicking = raw_input("Does the car make a clicking noise?")<|fim▁hole|> elif clicking == 'N': crank = raw_input("Does the car crank up but fails to start?") if crank == 'Y': print "Check spark plug connections." elif crank == 'N': start_and_die = raw_input("Does the engine start and then die?") if start_and_die == 'Y': fuel_injection = raw_input("Does your car have fuel injection?") if fuel_injection == 'N': print "Check to ensure the choke is opening and closing." elif fuel_injection == 'Y': print "Get it in for service." elif start_and_die == 'N': print "Engine is not getting enough fuel. Clean fuel pump." diagnose_car()<|fim▁end|>
if clicking == 'Y': print "Replace the battery."
<|file_name|>nn_ops.cc<|end_file_name|><|fim▁begin|>/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/util/mirror_pad_mode.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" namespace tensorflow { using shape_inference::DimensionHandle; using shape_inference::InferenceContext; using shape_inference::ShapeHandle; namespace { // A shape function that uses the tensor value at <input_idx> as a shape for // output 0. If the tensor value is not available, it uses a shape with <ndims> // unknown dims. Status InputTensorShapeOrUnknown(InferenceContext* c, int input_idx, int ndims) { ShapeHandle out; const Tensor* input = c->input_tensor(input_idx); if (input == nullptr) { out = c->UnknownShapeOfRank(ndims); } else { TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(input_idx, &out)); } c->set_output(0, out); return Status::OK(); } Status FractionalPoolShapeFn(InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input)); std::vector<float> pooling_ratio; TF_RETURN_IF_ERROR(c->GetAttr("pooling_ratio", &pooling_ratio)); if (pooling_ratio.size() != 4) { return errors::InvalidArgument( "pooling_ratio field must specify 4 dimensions"); } std::vector<DimensionHandle> output_dims; for (int i = 0; i < 4; ++i) { DimensionHandle d = c->Dim(input, i); if (c->ValueKnown(d)) { // This must match the same logic in the kernel function in // core/kernels/fractional_max_pool_op.cc. auto val = static_cast<int64>(floor(c->Value(d) / pooling_ratio[i])); if (val < 0) { return errors::InvalidArgument("Size computed for dim ", i, " is negative: ", val); } output_dims.push_back(c->MakeDim(val)); } else { output_dims.push_back(c->UnknownDim()); } } c->set_output(0, c->MakeShape(output_dims)); c->set_output(1, c->Vector(output_dims[1])); c->set_output(2, c->Vector(output_dims[2])); return Status::OK(); } } // namespace // -------------------------------------------------------------------------- REGISTER_OP("AvgPool") .Input("value: T") .Output("output: T") .Attr("ksize: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("T: realnumbertype") .SetShapeFn(shape_inference::AvgPoolShape) .Doc(R"doc( Performs average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. value: 4-D with shape `[batch, height, width, channels]`. ksize: The size of the sliding window for each dimension of `value`. strides: The stride of the sliding window for each dimension of `value`. padding: The type of padding algorithm to use. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. output: The average pooled output tensor. )doc"); REGISTER_OP("AvgPoolGrad") .Input("orig_input_shape: int32") .Input("grad: T") .Output("output: T") .Attr("ksize: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("T: realnumbertype") .SetShapeFn([](InferenceContext* c) { // NOTE(mrry): We could in principle work out the shape from the // gradients and the attrs, but if we do not know orig_input_shape // statically, then we are unlikely to know the shape of the // gradients either. return InputTensorShapeOrUnknown(c, 0 /* input_idx */, 4 /* ndims */); }) .Doc(R"doc( Computes gradients of the average pooling function. orig_input_shape: 1-D. Shape of the original input to `avg_pool`. grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the output of `avg_pool`. ksize: The size of the sliding window for each dimension of the input. strides: The stride of the sliding window for each dimension of the input. padding: The type of padding algorithm to use. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. output: 4-D. Gradients w.r.t. the input of `avg_pool`. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("BatchNormWithGlobalNormalization") .Input("t: T") .Input("m: T") .Input("v: T") .Input("beta: T") .Input("gamma: T") .Output("result: T") .Attr("T: numbertype") .Attr("variance_epsilon: float") .Attr("scale_after_normalization: bool") .Deprecated(9, "Use tf.nn.batch_normalization()") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input)); DimensionHandle last_dim = c->Dim(input, 3); for (int i = 1; i < 5; ++i) { // covers m, v, beta, gamma ShapeHandle vec; TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec)); TF_RETURN_IF_ERROR(c->Merge(last_dim, c->Dim(vec, 0), &last_dim)); } ShapeHandle out; TF_RETURN_IF_ERROR(c->ReplaceDim(input, 3, last_dim, &out)); c->set_output(0, out); return Status::OK(); }) .Doc(R"doc( Batch normalization. This op is deprecated. Prefer `tf.nn.batch_normalization`. t: A 4D input Tensor. m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. )doc"); REGISTER_OP("BatchNormWithGlobalNormalizationGrad") .Input("t: T") .Input("m: T") .Input("v: T") .Input("gamma: T") .Input("backprop: T") .Output("dx: T") .Output("dm: T") .Output("dv: T") .Output("db: T") .Output("dg: T") .Attr("T: numbertype") .Attr("variance_epsilon: float") .Attr("scale_after_normalization: bool") .Deprecated(9, "Use tf.nn.batch_normalization()") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input)); TF_RETURN_IF_ERROR( c->Merge(input, c->input(4), &input)); // with backprop DimensionHandle last_dim = c->Dim(input, 3); for (int i = 1; i < 4; ++i) { // covers m, v, gamma ShapeHandle vec; TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec)); TF_RETURN_IF_ERROR(c->Merge(last_dim, c->Dim(vec, 0), &last_dim)); } ShapeHandle dx; TF_RETURN_IF_ERROR(c->ReplaceDim(input, 3, last_dim, &dx)); c->set_output(0, dx); ShapeHandle vector_shape = c->Vector(last_dim); c->set_output(1, vector_shape); c->set_output(2, vector_shape); c->set_output(3, vector_shape); c->set_output(4, vector_shape); return Status::OK(); }) .Doc(R"doc( Gradients for batch normalization. This op is deprecated. See `tf.nn.batch_normalization`. t: A 4D input Tensor. m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this Tensor will be multiplied with the normalized Tensor. backprop: 4D backprop Tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. dx: 4D backprop tensor for input. dm: 1D backprop tensor for mean. dv: 1D backprop tensor for variance. db: 1D backprop tensor for beta. dg: 1D backprop tensor for gamma. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("FusedBatchNorm") .Input("x: T") .Input("scale: T") .Input("offset: T") .Input("mean: T") .Input("variance: T") .Output("y: T") .Output("batch_mean: T") .Output("batch_variance: T") .Output("reserve_space_1: T") .Output("reserve_space_2: T") .Attr("T: numbertype") .Attr("epsilon: float = 0.0001") .Attr("data_format: string = 'NHWC'") .Attr("is_training: bool = true") .SetShapeFn([](InferenceContext* c) { ShapeHandle x; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &x)); bool is_training; TF_RETURN_IF_ERROR(c->GetAttr("is_training", &is_training)); int number_inputs = (is_training) ? 3 : 5; string data_format; TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format)); DimensionHandle channel_dim = (data_format == "NHWC") ? c->Dim(x, 3) : c->Dim(x, 1); // covers scale, offset, and if is_training is false, mean, variance for (int i = 1; i < number_inputs; ++i) { ShapeHandle vec; TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec)); TF_RETURN_IF_ERROR(c->Merge(channel_dim, c->Dim(vec, 0), &channel_dim)); } ShapeHandle y; if (data_format == "NHWC") { TF_RETURN_IF_ERROR(c->ReplaceDim(x, 3, channel_dim, &y)); } else { TF_RETURN_IF_ERROR(c->ReplaceDim(x, 1, channel_dim, &y)); } c->set_output(0, y); ShapeHandle vector_shape = c->Vector(channel_dim); c->set_output(1, vector_shape); c->set_output(2, vector_shape); c->set_output(3, vector_shape); c->set_output(4, vector_shape); return Status::OK(); }) .Doc(R"doc( Batch normalization. Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". The size of 1D Tensors matches the dimension C of the 4D Tensors. x: A 4D Tensor for input data. scale: A 1D Tensor for scaling factor, to scale the normalized x. offset: A 1D Tensor for offset, to shift to the normalized x. mean: A 1D Tensor for population mean. Used for inference only; must be empty for training. variance: A 1D Tensor for population variance. Used for inference only; must be empty for training. y: A 4D Tensor for output data. batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow to compute the running mean. batch_variance: A 1D Tensor for the computed batch variance, to be used by TensorFlow to compute the running variance. reserve_space_1: A 1D Tensor for the computed batch mean, to be reused in the gradient computation. reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance in the cuDNN case), to be used in the gradient computation. T: The data type for the elements of input and output Tensors. epsilon: A small float number added to the variance of x. data_format: The data format for x and y. Either "NHWC" (default) or "NCHW". is_training: A bool value to indicate the operation is for training (default) or inference. )doc"); REGISTER_OP("FusedBatchNormGrad") .Input("y_backprop: T") .Input("x: T") .Input("scale: T") .Input("reserve_space_1: T") .Input("reserve_space_2: T") .Output("x_backprop: T") .Output("scale_backprop: T") .Output("offset_backprop: T") .Output("reserve_space_3: T") .Output("reserve_space_4: T") .Attr("T: numbertype") .Attr("epsilon: float = 0.0001") .Attr("data_format: string = 'NHWC'") .Attr("is_training: bool = true") .SetShapeFn([](InferenceContext* c) { ShapeHandle y_backprop; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &y_backprop)); ShapeHandle x; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 4, &x)); bool is_training; string data_format; TF_RETURN_IF_ERROR(c->GetAttr("is_training", &is_training)); TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format)); DimensionHandle channel_dim = (data_format == "NHWC") ? c->Dim(y_backprop, 3) : c->Dim(y_backprop, 1); if (data_format == "NHWC") { TF_RETURN_IF_ERROR(c->Merge(channel_dim, c->Dim(x, 3), &channel_dim)); } else { TF_RETURN_IF_ERROR(c->Merge(channel_dim, c->Dim(x, 1), &channel_dim)); } // covers scale, mean (reserve_space_1), variance (reserve_space_2) for (int i = 2; i < 5; ++i) { ShapeHandle vec; TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec)); TF_RETURN_IF_ERROR(c->Merge(channel_dim, c->Dim(vec, 0), &channel_dim)); } ShapeHandle x_backprop; if (data_format == "NHWC") { TF_RETURN_IF_ERROR( c->ReplaceDim(y_backprop, 3, channel_dim, &x_backprop)); } else { TF_RETURN_IF_ERROR( c->ReplaceDim(y_backprop, 1, channel_dim, &x_backprop)); } c->set_output(0, x_backprop); c->set_output(1, c->Vector(channel_dim)); c->set_output(2, c->Vector(channel_dim)); // Set the correct shapes for reserve_spaces // so that gradients can be performed when // the op is in a symbolic condition. if (is_training) { c->set_output(3, c->Vector(0)); c->set_output(4, c->Vector(0)); } else { c->set_output(3, c->Vector(channel_dim)); c->set_output(4, c->Vector(channel_dim)); } return Status::OK(); }) .Doc(R"doc( Gradient for batch normalization. Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". The size of 1D Tensors matches the dimension C of the 4D Tensors. y_backprop: A 4D Tensor for the gradient with respect to y. x: A 4D Tensor for input data. scale: A 1D Tensor for scaling factor, to scale the normalized x. reserve_space_1: A 1D Tensor for the computed batch mean, to be reused in the gradient computation. reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance in the cuDNN case), to be used in the gradient computation. x_backprop: A 4D Tensor for the gradient with respect to x. scale_backprop: A 1D Tensor for the gradient with respect to scale. offset_backprop: A 1D Tensor for the gradient with respect to offset. reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. reserve_space_4: Unused placeholder to match the variance input in FusedBatchNorm. T: The data type for the elements of input and output Tensors. epsilon: A small float number added to the variance of x. data_format: The data format for y_backprop, x, x_backprop. Either "NHWC" (default) or "NCHW". is_training: A bool value to indicate the operation is for training (default) or inference. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("BiasAdd") .Attr("T: numbertype") .Input("value: T") .Input("bias: T") .Attr(GetConvnetDataFormatAttrString()) .Output("output: T") .SetShapeFn(shape_inference::BiasAddShape) .Doc(R"doc( Adds `bias` to `value`. This is a special case of `tf.add` where `bias` is restricted to be 1-D. Broadcasting is supported, so `value` may have any number of dimensions. value: Any number of dimensions. bias: 1-D with size the last dimension of `value`. data_format: Specify the data format of the input and output data. With the default format "NHWC", the bias tensor will be added to the last dimension of the value tensor. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. The tensor will be added to "in_channels", the third-to-the-last dimension. output: Broadcasted sum of `value` and `bias`. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("BiasAddGrad") .Attr("T: numbertype") .Input("out_backprop: T") .Attr(GetConvnetDataFormatAttrString()) .Output("output: T") .SetShapeFn(shape_inference::BiasAddGradShape) .Doc(R"doc( The backward operation for "BiasAdd" on the "bias" tensor. It accumulates all the values from out_backprop into the feature dimension. For NHWC data format, the feature dimension is the last. For NCHW data format, the feature dimension is the third-to-last. out_backprop: Any number of dimensions. output: 1-D with size the feature dimension of `out_backprop`. data_format: Specify the data format of the input and output data. With the default format "NHWC", the bias tensor will be added to the last dimension of the value tensor. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. The tensor will be added to "in_channels", the third-to-the-last dimension. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("BiasAddV1") .Attr("T: numbertype") .Input("value: T") .Input("bias: T") .Output("output: T") .SetShapeFn(shape_inference::BiasAddShape) .Doc(R"doc( Adds `bias` to `value`. This is a deprecated version of BiasAdd and will be soon removed. This is a special case of `tf.add` where `bias` is restricted to be 1-D. Broadcasting is supported, so `value` may have any number of dimensions. value: Any number of dimensions. bias: 1-D with size the last dimension of `value`. output: Broadcasted sum of `value` and `bias`. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("Conv2D") .Input("input: T") .Input("filter: T") .Output("output: T") .Attr("T: {half, float, double}") .Attr("strides: list(int)") .Attr("use_cudnn_on_gpu: bool = true") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .SetShapeFn(shape_inference::Conv2DShape) .Doc(R"doc( Computes a 2-D convolution given 4-D `input` and `filter` tensors. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, this op performs the following: 1. Flattens the filter to a 2-D matrix with shape `[filter_height * filter_width * in_channels, output_channels]`. 2. Extracts image patches from the input tensor to form a *virtual* tensor of shape `[batch, out_height, out_width, filter_height * filter_width * in_channels]`. 3. For each patch, right-multiplies the filter matrix and the image patch vector. In detail, with the default NHWC format, output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1]`. input: A 4-D tensor. The dimension order is interpreted according to the value of `data_format`, see below for details. filter: A 4-D tensor of shape `[filter_height, filter_width, in_channels, out_channels]` output: A 4-D tensor. The dimension order is determined by the value of `data_format`, see below for details. strides: 1-D tensor of length 4. The stride of the sliding window for each dimension of `input`. The dimension order is determined by the value of `data_format`, see below for details. padding: The type of padding algorithm to use. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, channels, height, width]. )doc"); REGISTER_OP("Conv2DBackpropInput") .Input("input_sizes: int32") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, float, double}") .Attr("strides: list(int)") .Attr("use_cudnn_on_gpu: bool = true") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .SetShapeFn([](InferenceContext* c) { // NOTE(mrry): We could in principle work out the shape from the // gradients and the attrs, but if we do not know orig_input_shape // statically, then we are unlikely to know the shape of the // gradients either. return InputTensorShapeOrUnknown(c, 0 /* input_idx */, 4 /* ndims */); }) .Doc(R"doc( Computes the gradients of convolution with respect to the input. input_sizes: An integer vector representing the shape of `input`, where `input` is a 4-D `[batch, height, width, channels]` tensor. filter: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format. padding: The type of padding algorithm to use. output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient w.r.t. the input of the convolution. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. )doc"); // TODO(jeff): Instead of 'use_cudnn_for_gpu', maybe we should have a // more general string attribute ('kernel_impl'?) that can be used to // select among several possible implementations. REGISTER_OP("Conv2DBackpropFilter") .Input("input: T") .Input("filter_sizes: int32") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, float, double}") .Attr("strides: list(int)") .Attr("use_cudnn_on_gpu: bool = true") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .SetShapeFn([](InferenceContext* c) { // NOTE(mrry): We could in principle work out the shape from the // gradients and the attrs, but if we do not know orig_input_shape // statically, then we are unlikely to know the shape of the // gradients either. return InputTensorShapeOrUnknown(c, 1 /* input_idx */, 4 /* ndims */); }) .Doc(R"doc( Computes the gradients of convolution with respect to the filter. input: 4-D with shape `[batch, in_height, in_width, in_channels]`. filter_sizes: An integer vector representing the tensor shape of `filter`, where `filter` is a 4-D `[filter_height, filter_width, in_channels, out_channels]` tensor. out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format. padding: The type of padding algorithm to use. output: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. the `filter` input of the convolution. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. )doc"); namespace { Status CommonFusedConvCalculations(InferenceContext* c, bool has_resize) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input)); ShapeHandle resized = input; int paddings_index = 1; int filter_index = 2; if (has_resize) { paddings_index = 2; filter_index = 3; ShapeHandle unused_size; TF_RETURN_IF_ERROR(c->Merge(c->input(1), c->Vector(2), &unused_size)); const Tensor* size = c->input_tensor(1); DimensionHandle new_height = c->UnknownDim(); DimensionHandle new_width = c->UnknownDim(); if (size != nullptr) { new_height = c->MakeDim(size->flat<int32>()(0)); new_width = c->MakeDim(size->flat<int32>()(1)); } TF_RETURN_IF_ERROR(c->ReplaceDim(resized, 1, new_height, &resized)); TF_RETURN_IF_ERROR(c->ReplaceDim(resized, 2, new_width, &resized)); } ShapeHandle paddings; TF_RETURN_IF_ERROR(c->WithRank(c->input(paddings_index), 2, &paddings)); TF_RETURN_IF_ERROR( c->WithRank(resized, c->Value(c->Dim(paddings, 0)), &resized)); TF_RETURN_IF_ERROR( c->Merge(paddings, c->Matrix(c->Rank(resized), 2), &paddings)); const Tensor* paddings_t = c->input_tensor(paddings_index); ShapeHandle padded; if (paddings_t != nullptr) { std::vector<DimensionHandle> output_dims; for (int i = 0; i < 4; ++i) { DimensionHandle dim = c->Dim(resized, i); int64 p0 = static_cast<int64>(paddings_t->matrix<int32>()(i, 0)); int64 p1 = static_cast<int64>(paddings_t->matrix<int32>()(i, 1)); if (p0 < 0 || p1 < 0) { return errors::InvalidArgument("Paddings must be non-negative"); } TF_RETURN_IF_ERROR(c->Add(dim, p0 + p1, &dim)); output_dims.push_back(dim); } padded = c->MakeShape(output_dims); } else { padded = c->UnknownShapeOfRank(4); } // Work out the convolution's effect with 'padded' as the input. ShapeHandle filter; TF_RETURN_IF_ERROR(c->WithRank(c->input(filter_index), 4, &filter)); std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); if (strides.size() != 4) { return errors::InvalidArgument( "Operation requires the stride attribute to contain 4 values, but ", "got: ", strides.size()); } int32 stride_rows = strides[1]; int32 stride_cols = strides[2]; DimensionHandle batch_size_dim = c->Dim(padded, 0); DimensionHandle in_rows_dim = c->Dim(padded, 1); DimensionHandle in_cols_dim = c->Dim(padded, 2); DimensionHandle filter_rows_dim = c->Dim(filter, 0); DimensionHandle filter_cols_dim = c->Dim(filter, 1); DimensionHandle output_depth_dim = c->Dim(filter, 3); DimensionHandle unused; TF_RETURN_IF_ERROR(c->Merge(c->Dim(padded, 3), c->Dim(filter, 2), &unused)); Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); DimensionHandle output_rows, output_cols; TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims( c, in_rows_dim, filter_rows_dim, stride_rows, padding, &output_rows)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims( c, in_cols_dim, filter_cols_dim, stride_cols, padding, &output_cols)); ShapeHandle output_shape = c->MakeShape( {batch_size_dim, output_rows, output_cols, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); } } // namespace REGISTER_OP("FusedResizeAndPadConv2D") .Input("input: T") .Input("size: int32") .Input("paddings: int32") .Input("filter: T") .Output("output: T") .Attr("T: {half, float, double}") .Attr("resize_align_corners: bool = false") .Attr(GetMirrorPadModeAttrString()) .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { return CommonFusedConvCalculations(c, true /* has_resize */); }) .Doc(R"doc( Performs a resize and padding as a preprocess during a convolution. It's often possible to do spatial transformations more efficiently as part of the packing stage of a convolution, so this op allows for an optimized implementation where these stages are fused together. This prevents the need to write out the intermediate results as whole tensors, reducing memory pressure, and we can get some latency gains by merging the transformation calculations. The data_format attribute for Conv2D isn't supported by this op, and defaults to 'NHWC' order. Internally this op uses a single per-graph scratch buffer, which means that it will block if multiple versions are being run in parallel. This is because this operator is primarily an optimization to minimize memory usage. input: 4-D with shape `[batch, in_height, in_width, in_channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. paddings: A two-column matrix specifying the padding sizes. The number of rows must be the same as the rank of `input`. filter: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. resize_align_corners: If true, rescale input by (new_height - 1) / (height - 1), which exactly aligns the 4 corners of images and resized images. If false, rescale by new_height / height. Treat similarly the width dimension. strides: 1-D of length 4. The stride of the sliding window for each dimension of `input`. Must be in the same order as the dimension specified with format. padding: The type of padding algorithm to use. )doc"); REGISTER_OP("FusedPadConv2D") .Input("input: T") .Input("paddings: int32") .Input("filter: T") .Output("output: T") .Attr("T: {half, float, double}") .Attr(GetMirrorPadModeAttrString()) .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { return CommonFusedConvCalculations(c, false /* has_resize */); }) .Doc(R"doc( Performs a padding as a preprocess during a convolution. Similar to FusedResizeAndPadConv2d, this op allows for an optimized implementation where the spatial padding transformation stage is fused with the im2col lookup, but in this case without the bilinear filtering required for resizing. Fusing the padding prevents the need to write out the intermediate results as whole tensors, reducing memory pressure, and we can get some latency gains by merging the transformation calculations. The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' order is used instead. Internally this op uses a single per-graph scratch buffer, which means that it will block if multiple versions are being run in parallel. This is because this operator is primarily an optimization to minimize memory usage. input: 4-D with shape `[batch, in_height, in_width, in_channels]`. paddings: A two-column matrix specifying the padding sizes. The number of rows must be the same as the rank of `input`. filter: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. strides: 1-D of length 4. The stride of the sliding window for each dimension of `input`. Must be in the same order as the dimension specified with format. padding: The type of padding algorithm to use. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("DepthwiseConv2dNative") .Input("input: T") .Input("filter: T") .Output("output: T") .Attr("T: {float, double}") .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .SetShapeFn(shape_inference::DepthwiseConv2DNativeShape) .Doc(R"doc( Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]`, containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. Thus, the output has `in_channels * channel_multiplier` channels. for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1]`. strides: 1-D of length 4. The stride of the sliding window for each dimension of `input`. padding: The type of padding algorithm to use. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, channels, height, width]. )doc"); REGISTER_OP("DepthwiseConv2dNativeBackpropInput") .Input("input_sizes: int32") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: {float, double}") .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .SetShapeFn([](InferenceContext* c) { // NOTE(mrry): We could in principle work out the shape from the // gradients and the attrs, but if we do not know orig_input_shape // statically, then we are unlikely to know the shape of the // gradients either. return InputTensorShapeOrUnknown(c, 0 /* input_idx */, 4 /* ndims */); }) .Doc(R"doc( Computes the gradients of depthwise convolution with respect to the input. input_sizes: An integer vector representing the shape of `input`, based on `data_format`. For example, if `data_format` is 'NHWC' then `input` is a 4-D `[batch, height, width, channels]` tensor. filter: 4-D with shape `[filter_height, filter_width, in_channels, depthwise_multiplier]`. out_backprop: 4-D with shape based on `data_format`. For example, if `data_format` is 'NHWC' then out_backprop shape is `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: The stride of the sliding window for each dimension of the input of the convolution. padding: The type of padding algorithm to use. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, channels, height, width]. output: 4-D with shape according to `data_format`. For example, if `data_format` is 'NHWC', output shape is `[batch, in_height, in_width, in_channels]`. Gradient w.r.t. the input of the convolution. )doc"); REGISTER_OP("DepthwiseConv2dNativeBackpropFilter") .Input("input: T") .Input("filter_sizes: int32") .Input("out_backprop: T") .Output("output: T") .Attr("T: {float, double}") .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .SetShapeFn([](InferenceContext* c) { // NOTE(mrry): We could in principle work out the shape from the // gradients and the attrs, but if we do not know orig_input_shape // statically, then we are unlikely to know the shape of the // gradients either. return InputTensorShapeOrUnknown(c, 1 /* input_idx */, 4 /* ndims */); }) .Doc(R"doc( Computes the gradients of depthwise convolution with respect to the filter. input: 4-D with shape based on `data_format`. For example, if `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, in_width, in_channels]` tensor. filter_sizes: An integer vector representing the tensor shape of `filter`, where `filter` is a 4-D `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. out_backprop: 4-D with shape based on `data_format`. For example, if `data_format` is 'NHWC' then out_backprop shape is `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: The stride of the sliding window for each dimension of the input of the convolution. padding: The type of padding algorithm to use. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, channels, height, width]. output: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. the `filter` input of the convolution. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("Conv3D") .Input("input: T") .Input("filter: T") .Output("output: T") .Attr("T: numbertype") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .SetShapeFn(shape_inference::Conv3DShape) .Doc(R"doc( Computes a 3-D convolution given 5-D `input` and `filter` tensors. In signal processing, cross-correlation is a measure of similarity of two waveforms as a function of a time-lag applied to one of them. This is also known as a sliding dot product or sliding inner-product. Our Conv3D implements a form of cross-correlation. input: Shape `[batch, in_depth, in_height, in_width, in_channels]`. filter: Shape `[filter_depth, filter_height, filter_width, in_channels, out_channels]`. `in_channels` must match between `input` and `filter`. strides: 1-D tensor of length 5. The stride of the sliding window for each dimension of `input`. Must have `strides[0] = strides[4] = 1`. padding: The type of padding algorithm to use. data_format: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. )doc"); REGISTER_OP("Conv3DBackpropInput") .Input("input: T") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: numbertype") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Deprecated(10, "Use Conv3DBackpropInputV2") .SetShapeFn([](InferenceContext* c) { return UnchangedShapeWithRank(c, 5); }) .Doc(R"doc( Computes the gradients of 3-D convolution with respect to the input. input: Shape `[batch, depth, rows, cols, in_channels]`. filter: Shape `[depth, rows, cols, in_channels, out_channels]`. `in_channels` must match between `input` and `filter`. out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. strides: 1-D tensor of length 5. The stride of the sliding window for each dimension of `input`. Must have `strides[0] = strides[4] = 1`. padding: The type of padding algorithm to use. )doc"); REGISTER_OP("Conv3DBackpropFilter") .Input("input: T") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: numbertype") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Deprecated(10, "Use Conv3DBackpropFilterV2") .SetShapeFn([](InferenceContext* c) { ShapeHandle out; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 5, &out)); c->set_output(0, out); return Status::OK(); }) .Doc(R"doc( Computes the gradients of 3-D convolution with respect to the filter. input: Shape `[batch, depth, rows, cols, in_channels]`. filter: Shape `[depth, rows, cols, in_channels, out_channels]`. `in_channels` must match between `input` and `filter`. out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. strides: 1-D tensor of length 5. The stride of the sliding window for each dimension of `input`. Must have `strides[0] = strides[4] = 1`. padding: The type of padding algorithm to use. )doc"); REGISTER_OP("Conv3DBackpropInputV2") .Input("input_sizes: int32") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: numbertype") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .SetShapeFn([](InferenceContext* c) { ShapeHandle s; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &s)); TF_RETURN_IF_ERROR(c->WithRank(s, 5, &s)); c->set_output(0, s); return Status::OK(); }) .Doc(R"doc( Computes the gradients of 3-D convolution with respect to the input. input_sizes: An integer vector representing the tensor shape of `input`, where `input` is a 5-D `[batch, depth, rows, cols, in_channels]` tensor. filter: Shape `[depth, rows, cols, in_channels, out_channels]`. `in_channels` must match between `input` and `filter`. out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. strides: 1-D tensor of length 5. The stride of the sliding window for each dimension of `input`. Must have `strides[0] = strides[4] = 1`. padding: The type of padding algorithm to use. data_format: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. )doc"); REGISTER_OP("Conv3DBackpropFilterV2") .Input("input: T") .Input("filter_sizes: int32") .Input("out_backprop: T") .Output("output: T") .Attr("T: numbertype") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .SetShapeFn([](InferenceContext* c) { ShapeHandle s; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &s)); TF_RETURN_IF_ERROR(c->WithRank(s, 5, &s)); c->set_output(0, s); return Status::OK(); }) .Doc(R"doc( Computes the gradients of 3-D convolution with respect to the filter. input: Shape `[batch, depth, rows, cols, in_channels]`. filter_sizes: An integer vector representing the tensor shape of `filter`, where `filter` is a 5-D `[filter_depth, filter_height, filter_width, in_channels, out_channels]` tensor. out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. strides: 1-D tensor of length 5. The stride of the sliding window for each dimension of `input`. Must have `strides[0] = strides[4] = 1`. padding: The type of padding algorithm to use. data_format: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("AvgPool3D") .Input("input: T") .Output("output: T") .Attr("ksize: list(int) >= 5") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("T: numbertype") .SetShapeFn(shape_inference::Pool3DShape) .Doc(R"doc( Performs 3D average pooling on the input. ksize: 1-D tensor of length 5. The size of the window for each dimension of the input tensor. Must have `ksize[0] = ksize[4] = 1`. strides: 1-D tensor of length 5. The stride of the sliding window for each dimension of `input`. Must have `strides[0] = strides[4] = 1`. padding: The type of padding algorithm to use. input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. output: The average pooled output tensor. data_format: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. )doc"); REGISTER_OP("AvgPool3DGrad") .Input("orig_input_shape: int32") .Input("grad: T") .Output("output: T") .Attr("ksize: list(int) >= 5") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("T: numbertype") .SetShapeFn([](InferenceContext* c) { ShapeHandle s; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &s)); TF_RETURN_IF_ERROR(c->WithRank(s, 5, &s)); c->set_output(0, s); return Status::OK(); }) .Doc(R"doc( Computes gradients of average pooling function. ksize: 1-D tensor of length 5. The size of the window for each dimension of the input tensor. Must have `ksize[0] = ksize[4] = 1`. strides: 1-D tensor of length 5. The stride of the sliding window for each dimension of `input`. Must have `strides[0] = strides[4] = 1`. padding: The type of padding algorithm to use. orig_input_shape: The original input dimensions. grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. output: The backprop for input. data_format: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("MaxPool3D") .Input("input: T") .Output("output: T") .Attr("ksize: list(int) >= 5") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("T: numbertype") .SetShapeFn(shape_inference::Pool3DShape) .Doc(R"doc( Performs 3D max pooling on the input. ksize: 1-D tensor of length 5. The size of the window for each dimension of the input tensor. Must have `ksize[0] = ksize[4] = 1`. strides: 1-D tensor of length 5. The stride of the sliding window for each dimension of `input`. Must have `strides[0] = strides[4] = 1`. padding: The type of padding algorithm to use. input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. output: The max pooled output tensor. data_format: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. )doc"); REGISTER_OP("MaxPool3DGrad") .Input("orig_input: TInput") .Input("orig_output: TInput") .Input("grad: T") .Output("output: T") .Attr("ksize: list(int) >= 5 ") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("T: numbertype = DT_FLOAT") .Attr("TInput: numbertype = DT_FLOAT") .SetShapeFn([](InferenceContext* c) { return UnchangedShapeWithRank(c, 5); }) .Doc(R"doc( Computes gradients of max pooling function. ksize: 1-D tensor of length 5. The size of the window for each dimension of the input tensor. Must have `ksize[0] = ksize[4] = 1`. strides: 1-D tensor of length 5. The stride of the sliding window for each dimension of `input`. Must have `strides[0] = strides[4] = 1`. padding: The type of padding algorithm to use. orig_input: The original input tensor. orig_output: The original output tensor. grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. data_format: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. )doc"); REGISTER_OP("MaxPool3DGradGrad") .Input("orig_input: T") .Input("orig_output: T") .Input("grad: T") .Output("output: T") .Attr("ksize: list(int) >= 5 ") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("T: realnumbertype") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::Pool3DShape(c)); ShapeHandle unused; // Validate 'orig_input' is the same shape as 'grad' TF_RETURN_IF_ERROR(c->Merge(c->input(0), c->input(2), &unused)); // Validate 'orig_output' is same shape as 'output' TF_RETURN_IF_ERROR(c->Merge(c->input(1), c->output(0), &unused)); return Status::OK(); }) .Doc(R"doc( Computes second-order gradients of the maxpooling function. ksize: 1-D tensor of length 5. The size of the window for each dimension of the input tensor. Must have `ksize[0] = ksize[4] = 1`. strides: 1-D tensor of length 5. The stride of the sliding window for each dimension of `input`. Must have `strides[0] = strides[4] = 1`. padding: The type of padding algorithm to use. orig_input: The original input tensor. orig_output: The original output tensor. grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. output: Gradients of gradients w.r.t. the input to `max_pool`. data_format: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("L2Loss") .Input("t: T") .Output("output: T") .Attr("T: numbertype") .SetShapeFn(shape_inference::ScalarShape) .Doc(R"doc( L2 Loss. Computes half the L2 norm of a tensor without the `sqrt`: output = sum(t ** 2) / 2 t: Typically 2-D, but may have any dimensions. output: 0-D. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("LRN") .Input("input: T") .Output("output: T") .Attr("depth_radius: int = 5") .Attr("bias: float = 1.0") .Attr("alpha: float = 1.0") .Attr("beta: float = 0.5") .Attr("T: {float, half} = DT_FLOAT") .SetShapeFn([](InferenceContext* c) { return UnchangedShapeWithRank(c, 4); }) .Doc(R"doc( Local Response Normalization. The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last dimension), and each vector is normalized independently. Within a given vector, each component is divided by the weighted, squared sum of inputs within `depth_radius`. In detail, sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) output = input / (bias + alpha * sqr_sum) ** beta For details, see [Krizhevsky et al., ImageNet classification with deep convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). input: 4-D. depth_radius: 0-D. Half-width of the 1-D normalization window. bias: An offset (usually positive to avoid dividing by 0). alpha: A scale factor, usually positive. beta: An exponent. )doc"); REGISTER_OP("LRNGrad") .Input("input_grads: T") .Input("input_image: T") .Input("output_image: T") .Output("output: T") .Attr("depth_radius: int = 5") .Attr("bias: float = 1.0") .Attr("alpha: float = 1.0") .Attr("beta: float = 0.5") .Attr("T: {float, half} = DT_FLOAT") .SetShapeFn([](InferenceContext* c) { ShapeHandle s; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &s)); // input_grads TF_RETURN_IF_ERROR(c->Merge(s, c->input(1), &s)); // input_image TF_RETURN_IF_ERROR(c->Merge(s, c->input(2), &s)); // output_image c->set_output(0, s); return Status::OK(); }) .Doc(R"doc( Gradients for Local Response Normalization. input_grads: 4-D with shape `[batch, height, width, channels]`. input_image: 4-D with shape `[batch, height, width, channels]`. output_image: 4-D with shape `[batch, height, width, channels]`. depth_radius: A depth radius. bias: An offset (usually > 0 to avoid dividing by 0). alpha: A scale factor, usually positive. beta: An exponent. output: The gradients for LRN. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("MaxPool") .Attr("T: realnumbertype = DT_FLOAT") .Attr("ksize: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Input("input: T") .Output("output: T") .SetShapeFn(shape_inference::MaxPoolShape) .Doc(R"doc( Performs max pooling on the input. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the input tensor. padding: The type of padding algorithm to use. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. input: 4-D input to pool over. output: The max pooled output tensor. )doc"); REGISTER_OP("MaxPoolGrad") .Attr("ksize: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Input("orig_input: T") .Input("orig_output: T") .Input("grad: T") .Output("output: T") .Attr("T: realnumbertype = DT_FLOAT") .SetShapeFn([](InferenceContext* c) { return UnchangedShapeWithRank(c, 4); }) .Doc(R"doc( Computes gradients of the maxpooling function. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the input tensor. padding: The type of padding algorithm to use. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. orig_input: The original input tensor. orig_output: The original output tensor. grad: 4-D. Gradients w.r.t. the output of `max_pool`. output: Gradients w.r.t. the input to `max_pool`. )doc"); REGISTER_OP("MaxPoolGradGrad") .Attr("ksize: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Input("orig_input: T") .Input("orig_output: T") .Input("grad: T") .Output("output: T") .Attr("T: realnumbertype") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::MaxPoolShape(c)); ShapeHandle unused; // Validate 'orig_input' is the same shape as 'grad' TF_RETURN_IF_ERROR(c->Merge(c->input(0), c->input(2), &unused)); // Validate 'orig_output' is same shape as 'output' TF_RETURN_IF_ERROR(c->Merge(c->input(1), c->output(0), &unused)); return Status::OK(); }) .Doc(R"doc( Computes second-order gradients of the maxpooling function. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the input tensor. padding: The type of padding algorithm to use. data_format: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. orig_input: The original input tensor. orig_output: The original output tensor. grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`. output: Gradients of gradients w.r.t. the input to `max_pool`. )doc"); REGISTER_OP("MaxPoolWithArgmax") .Attr("ksize: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr("Targmax: {int32, int64} = DT_INT64") .Attr(GetPaddingAttrString()) .Input("input: T") .Output("output: T") .Output("argmax: Targmax") .Attr("T: realnumbertype") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::MaxPoolShape(c)); c->set_output(1, c->output(0)); return Status::OK(); }) .Doc(R"doc( Performs max pooling on the input and outputs both max values and indices. The indices in `argmax` are flattened, so that a maximum value at position `[b, y, x, c]` becomes flattened index `((b * height + y) * width + x) * channels + c`. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the input tensor. padding: The type of padding algorithm to use. input: 4-D with shape `[batch, height, width, channels]`. Input to pool over. output: The max pooled output tensor. argmax: 4-D. The flattened indices of the max values chosen for each output. )doc"); REGISTER_OP("MaxPoolGradWithArgmax") .Attr("ksize: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr(GetPaddingAttrString()) .Attr("Targmax: {int32, int64}") .Input("input: T") .Input("grad: T") .Input("argmax: Targmax") .Output("output: T") .Attr("T: realnumbertype") .SetShapeFn([](InferenceContext* c) { return UnchangedShapeWithRank(c, 4); }) .Doc(R"doc( Computes gradients of the maxpooling function. ksize: The size of the window for each dimension of the input tensor.<|fim▁hole|> input tensor. padding: The type of padding algorithm to use. input: The original input. grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the output of `max_pool`. argmax: The indices of the maximum values chosen for each output of `max_pool`. output: Gradients w.r.t. the input of `max_pool`. )doc"); REGISTER_OP("MaxPoolGradGradWithArgmax") .Attr("ksize: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr(GetPaddingAttrString()) .Attr("Targmax: {int32, int64}") .Input("input: T") .Input("grad: T") .Input("argmax: Targmax") .Output("output: T") .Attr("T: realnumbertype") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::MaxPoolShape(c)); ShapeHandle unused; // Validate 'orig_input' is the same shape as 'grad' TF_RETURN_IF_ERROR(c->Merge(c->input(0), c->input(1), &unused)); // Validate 'argmax' is same shape as 'output' TF_RETURN_IF_ERROR(c->Merge(c->input(2), c->output(0), &unused)); return Status::OK(); }) .Doc(R"doc( Computes second-order gradients of the maxpooling function. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the input tensor. padding: The type of padding algorithm to use. input: The original input. grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the input of `max_pool`. argmax: The indices of the maximum values chosen for each output of `max_pool`. output: Gradients of gradients w.r.t. the input of `max_pool`. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("Dilation2D") .Input("input: T") .Input("filter: T") .Output("output: T") .Attr("T: realnumbertype") .Attr("strides: list(int) >= 4") .Attr("rates: list(int) >= 4") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape)); ShapeHandle filter_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 3, &filter_shape)); std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); if (strides.size() != 4) { return errors::InvalidArgument( "Dilation2D requires the stride attribute to contain 4 values, but " "got: ", strides.size()); } std::vector<int32> rates; TF_RETURN_IF_ERROR(c->GetAttr("rates", &rates)); if (rates.size() != 4) { return errors::InvalidArgument( "Dilation2D requires the rates attribute to contain 4 values, but " "got: ", rates.size()); } int32 stride_rows = strides[1]; int32 stride_cols = strides[2]; int32 rate_rows = rates[1]; int32 rate_cols = rates[2]; DimensionHandle batch_size_dim = c->Dim(input_shape, 0); DimensionHandle in_rows_dim = c->Dim(input_shape, 1); DimensionHandle in_cols_dim = c->Dim(input_shape, 2); DimensionHandle filter_rows_dim = c->Dim(filter_shape, 0); DimensionHandle filter_cols_dim = c->Dim(filter_shape, 1); DimensionHandle output_depth_dim = c->Dim(filter_shape, 2); if (!c->ValueKnown(in_rows_dim) || !c->ValueKnown(in_cols_dim) || !c->ValueKnown(filter_rows_dim) || !c->ValueKnown(filter_cols_dim)) { ShapeHandle output_shape = c->MakeShape({batch_size_dim, InferenceContext::kUnknownDim, InferenceContext::kUnknownDim, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); } DimensionHandle unused; TF_RETURN_IF_ERROR( c->Merge(c->Dim(input_shape, 3), output_depth_dim, &unused)); auto in_rows = c->Value(in_rows_dim); auto in_cols = c->Value(in_cols_dim); auto filter_rows = c->Value(filter_rows_dim); auto filter_cols = c->Value(filter_cols_dim); auto filter_rows_eff = filter_rows + (filter_rows - 1) * (rate_rows - 1); auto filter_cols_eff = filter_cols + (filter_cols - 1) * (rate_cols - 1); Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); int64 output_rows, output_cols; int64 padding_before, padding_after; TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_rows, filter_rows_eff, stride_rows, padding, &output_rows, &padding_before, &padding_after)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_cols, filter_cols_eff, stride_cols, padding, &output_cols, &padding_before, &padding_after)); ShapeHandle output_shape = c->MakeShape( {batch_size_dim, output_rows, output_cols, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); }) .Doc(R"doc( Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. The `input` tensor has shape `[batch, in_height, in_width, depth]` and the `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default "NHWC" `data_format`. In detail, the grayscale morphological 2-D dilation is the max-sum correlation (for consistency with `conv2d`, we use unmirrored filters): output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c] Max-pooling is a special case when the filter has size equal to the pooling kernel size and contains all zeros. Note on duality: The dilation of `input` by the `filter` is equal to the negation of the erosion of `-input` by the reflected `filter`. input: 4-D with shape `[batch, in_height, in_width, depth]`. filter: 3-D with shape `[filter_height, filter_width, depth]`. strides: The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. rates: The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. padding: The type of padding algorithm to use. output: 4-D with shape `[batch, out_height, out_width, depth]`. )doc"); REGISTER_OP("Dilation2DBackpropInput") .Input("input: T") .Input("filter: T") .Input("out_backprop: T") .Output("in_backprop: T") .Attr("T: realnumbertype") .Attr("strides: list(int) >= 4") .Attr("rates: list(int) >= 4") .Attr(GetPaddingAttrString()) .SetShapeFn(shape_inference::UnchangedShape) .Doc(R"doc( Computes the gradient of morphological 2-D dilation with respect to the input. input: 4-D with shape `[batch, in_height, in_width, depth]`. filter: 3-D with shape `[filter_height, filter_width, depth]`. out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`. strides: 1-D of length 4. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. rates: 1-D of length 4. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. padding: The type of padding algorithm to use. )doc"); REGISTER_OP("Dilation2DBackpropFilter") .Input("input: T") .Input("filter: T") .Input("out_backprop: T") .Output("filter_backprop: T") .Attr("T: realnumbertype") .Attr("strides: list(int) >= 4") .Attr("rates: list(int) >= 4") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->input(1)); return Status::OK(); }) .Doc(R"doc( Computes the gradient of morphological 2-D dilation with respect to the filter. input: 4-D with shape `[batch, in_height, in_width, depth]`. filter: 3-D with shape `[filter_height, filter_width, depth]`. out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`. strides: 1-D of length 4. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. rates: 1-D of length 4. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. padding: The type of padding algorithm to use. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("Relu") .Input("features: T") .Output("activations: T") .Attr("T: realnumbertype") .SetShapeFn(shape_inference::UnchangedShape) .Doc(R"doc( Computes rectified linear: `max(features, 0)`. )doc"); REGISTER_OP("ReluGrad") .Input("gradients: T") .Input("features: T") .Output("backprops: T") .Attr("T: realnumbertype") .SetShapeFn(shape_inference::MergeBothInputsShapeFn) .Doc(R"doc( Computes rectified linear gradients for a Relu operation. gradients: The backpropagated gradients to the corresponding Relu operation. features: The features passed as input to the corresponding Relu operation, OR the outputs of that operation (both work equivalently). backprops: `gradients * (features > 0)`. )doc"); REGISTER_OP("Relu6") .Input("features: T") .Output("activations: T") .Attr("T: realnumbertype") .SetShapeFn(shape_inference::UnchangedShape) .Doc(R"doc( Computes rectified linear 6: `min(max(features, 0), 6)`. )doc"); REGISTER_OP("Relu6Grad") .Input("gradients: T") .Input("features: T") .Output("backprops: T") .Attr("T: realnumbertype") .SetShapeFn(shape_inference::MergeBothInputsShapeFn) .Doc(R"doc( Computes rectified linear 6 gradients for a Relu6 operation. gradients: The backpropagated gradients to the corresponding Relu6 operation. features: The features passed as input to the corresponding Relu6 operation. backprops: The gradients: `gradients * (features > 0) * (features < 6)`. )doc"); REGISTER_OP("Elu") .Input("features: T") .Output("activations: T") .Attr("T: realnumbertype") .SetShapeFn(shape_inference::UnchangedShape) .Doc(R"doc( Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) ](http://arxiv.org/abs/1511.07289) )doc"); REGISTER_OP("EluGrad") .Input("gradients: T") .Input("outputs: T") .Output("backprops: T") .Attr("T: realnumbertype") .SetShapeFn(shape_inference::MergeBothInputsShapeFn) .Doc(R"doc( Computes gradients for the exponential linear (Elu) operation. gradients: The backpropagated gradients to the corresponding Elu operation. outputs: The outputs of the corresponding Elu operation. backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, `gradients` otherwise. )doc"); REGISTER_OP("Softplus") .Input("features: T") .Output("activations: T") .Attr("T: realnumbertype") .SetShapeFn(shape_inference::UnchangedShape) .Doc(R"doc( Computes softplus: `log(exp(features) + 1)`. )doc"); REGISTER_OP("SoftplusGrad") .Input("gradients: T") .Input("features: T") .Output("backprops: T") .Attr("T: realnumbertype") .SetShapeFn(shape_inference::MergeBothInputsShapeFn) .Doc(R"doc( Computes softplus gradients for a softplus operation. gradients: The backpropagated gradients to the corresponding softplus operation. features: The features passed as input to the corresponding softplus operation. backprops: The gradients: `gradients / (1 + exp(-features))`. )doc"); REGISTER_OP("Softsign") .Input("features: T") .Output("activations: T") .Attr("T: realnumbertype") .SetShapeFn(shape_inference::UnchangedShape) .Doc(R"doc( Computes softsign: `features / (abs(features) + 1)`. )doc"); REGISTER_OP("SoftsignGrad") .Input("gradients: T") .Input("features: T") .Output("backprops: T") .Attr("T: realnumbertype") .SetShapeFn(shape_inference::MergeBothInputsShapeFn) .Doc(R"doc( Computes softsign gradients for a softsign operation. gradients: The backpropagated gradients to the corresponding softsign operation. features: The features passed as input to the corresponding softsign operation. backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("Softmax") .Input("logits: T") .Output("softmax: T") .Attr("T: {half, float, double}") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 1); }) .Doc(R"doc( Computes softmax activations. For each batch `i` and class `j` we have softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) logits: 2-D with shape `[batch_size, num_classes]`. softmax: Same shape as `logits`. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("LogSoftmax") .Input("logits: T") .Output("logsoftmax: T") .Attr("T: {half, float, double}") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 1); }) .Doc(R"doc( Computes log softmax activations. For each batch `i` and class `j` we have logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) logits: 2-D with shape `[batch_size, num_classes]`. logsoftmax: Same shape as `logits`. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("SoftmaxCrossEntropyWithLogits") .Input("features: T") .Input("labels: T") .Output("loss: T") .Output("backprop: T") .Attr("T: {half, float, double}") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &input)); TF_RETURN_IF_ERROR(c->Merge(input, c->input(1), &input)); DimensionHandle batch_size = c->Dim(input, 0); c->set_output(0, c->Vector(batch_size)); c->set_output(1, input); return Status::OK(); }) .Doc(R"doc( Computes softmax cross entropy cost and gradients to backpropagate. Inputs are the logits, not probabilities. features: batch_size x num_classes matrix labels: batch_size x num_classes matrix The caller must ensure that each batch of labels represents a valid probability distribution. loss: Per example loss (batch_size vector). backprop: backpropagated gradients (batch_size x num_classes matrix). )doc"); REGISTER_OP("SparseSoftmaxCrossEntropyWithLogits") .Input("features: T") .Input("labels: Tlabels") .Output("loss: T") .Output("backprop: T") .Attr("T: {half, float, double}") .Attr("Tlabels: {int32, int64} = DT_INT64") .SetShapeFn([](InferenceContext* c) { ShapeHandle features; ShapeHandle labels; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &features)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &labels)); DimensionHandle batch_size; TF_RETURN_IF_ERROR( c->Merge(c->Dim(features, 0), c->Dim(labels, 0), &batch_size)); TF_RETURN_IF_ERROR(c->ReplaceDim(features, 0, batch_size, &features)); c->set_output(0, c->Vector(batch_size)); c->set_output(1, features); return Status::OK(); }) .Doc(R"doc( Computes softmax cross entropy cost and gradients to backpropagate. Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept a matrix of label probabilities, but rather a single label per row of features. This label is considered to have probability 1.0 for the given row. Inputs are the logits, not probabilities. features: batch_size x num_classes matrix labels: batch_size vector with values in [0, num_classes). This is the label for the given minibatch entry. loss: Per example loss (batch_size vector). backprop: backpropagated gradients (batch_size x num_classes matrix). )doc"); // -------------------------------------------------------------------------- REGISTER_OP("InTopK") .Input("predictions: float") .Input("targets: T") .Output("precision: bool") .Attr("k: int") .Attr("T: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle predictions; ShapeHandle targets; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &predictions)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &targets)); DimensionHandle batch_size; TF_RETURN_IF_ERROR( c->Merge(c->Dim(predictions, 0), c->Dim(targets, 0), &batch_size)); c->set_output(0, c->Vector(batch_size)); return Status::OK(); }) .Doc(R"doc( Says whether the targets are in the top `K` predictions. This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the prediction for the target class is among the top `k` predictions among all predictions for example `i`. Note that the behavior of `InTopK` differs from the `TopK` op in its handling of ties; if multiple classes have the same prediction value and straddle the top-`k` boundary, all of those classes are considered to be in the top `k`. More formally, let \\(predictions_i\\) be the predictions for all classes for example `i`, \\(targets_i\\) be the target class for example `i`, \\(out_i\\) be the output for example `i`, $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ predictions: A `batch_size` x `classes` tensor. targets: A `batch_size` vector of class ids. k: Number of top elements to look at for computing precision. precision: Computed Precision at `k` as a `bool Tensor`. )doc"); namespace { Status TopKShapeFn(InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &input)); // Get the k value, either from input tensor or attribute. DimensionHandle k_dim; if (c->num_inputs() >= 2) { TF_RETURN_IF_ERROR(c->MakeDimForScalarInput(1, &k_dim)); } else { int32 k; TF_RETURN_IF_ERROR(c->GetAttr("k", &k)); if (k < 0) { return errors::InvalidArgument("Need k >= 0, got ", k); } k_dim = c->MakeDim(k); } DimensionHandle last_dim = c->Dim(input, -1); if (c->ValueKnown(last_dim) && c->ValueKnown(k_dim) && c->Value(last_dim) < c->Value(k_dim)) { return errors::InvalidArgument( "input must have last dimension >= k = ", c->Value(k_dim), " but is ", c->Value(last_dim)); } // Replace last_dim with k_dim. ShapeHandle s; TF_RETURN_IF_ERROR(c->Subshape(input, 0, -1, &s)); TF_RETURN_IF_ERROR(c->Concatenate(s, c->Vector(k_dim), &s)); c->set_output(0, s); c->set_output(1, s); return Status::OK(); } } // namespace REGISTER_OP("TopK") .Input("input: T") .Output("values: T") .Output("indices: int32") .Attr("k: int >= 0") .Attr("sorted: bool = true") .Attr("T: realnumbertype") .Deprecated(7, "Use TopKV2 instead") .SetShapeFn(TopKShapeFn) .Doc(R"doc( Finds values and indices of the `k` largest elements for the last dimension. If the input is a vector (rank-1), finds the `k` largest entries in the vector and outputs their values and indices as vectors. Thus `values[j]` is the `j`-th largest entry in `input`, and its index is `indices[j]`. For matrices (resp. higher rank input), computes the top `k` entries in each row (resp. vector along the last dimension). Thus, values.shape = indices.shape = input.shape[:-1] + [k] If two elements are equal, the lower-index element appears first. If `k` varies dynamically, use `TopKV2` below. input: 1-D or higher with last dimension at least `k`. k: Number of top elements to look for along the last dimension (along each row for matrices). sorted: If true the resulting `k` elements will be sorted by the values in descending order. values: The `k` largest elements along each last dimensional slice. indices: The indices of `values` within the last dimension of `input`. )doc"); // This is the same as `TopK`, but takes `k` as in input rather than an attr. REGISTER_OP("TopKV2") .Input("input: T") .Input("k: int32") .Output("values: T") .Output("indices: int32") .Attr("sorted: bool = true") .Attr("T: realnumbertype") .SetShapeFn(TopKShapeFn) .Doc(R"doc( Finds values and indices of the `k` largest elements for the last dimension. If the input is a vector (rank-1), finds the `k` largest entries in the vector and outputs their values and indices as vectors. Thus `values[j]` is the `j`-th largest entry in `input`, and its index is `indices[j]`. For matrices (resp. higher rank input), computes the top `k` entries in each row (resp. vector along the last dimension). Thus, values.shape = indices.shape = input.shape[:-1] + [k] If two elements are equal, the lower-index element appears first. input: 1-D or higher with last dimension at least `k`. k: 0-D. Number of top elements to look for along the last dimension (along each row for matrices). sorted: If true the resulting `k` elements will be sorted by the values in descending order. values: The `k` largest elements along each last dimensional slice. indices: The indices of `values` within the last dimension of `input`. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("FractionalMaxPool") .Input("value: T") .Output("output: T") .Output("row_pooling_sequence: int64") .Output("col_pooling_sequence: int64") .Attr("pooling_ratio: list(float) >=4") .Attr("pseudo_random: bool = false") .Attr("overlapping: bool = false") .Attr("deterministic: bool = false") .Attr("seed: int = 0") .Attr("seed2: int = 0") .Attr("T: {float, double, int32, int64}") .SetShapeFn(FractionalPoolShapeFn) .Doc(R"doc( Performs fractional max pooling on the input. Fractional max pooling is slightly different than regular max pooling. In regular max pooling, you downsize an input set by taking the maximum value of smaller N x N subsections of the set (often 2x2), and try to reduce the set by a factor of N, where N is an integer. Fractional max pooling, as you might expect from the word "fractional", means that the overall reduction ratio N does not have to be an integer. The sizes of the pooling regions are generated randomly but are fairly uniform. For example, let's look at the height dimension, and the constraints on the list of rows that will be pool boundaries. First we define the following: 1. input_row_length : the number of rows from the input set 2. output_row_length : which will be smaller than the input 3. alpha = input_row_length / output_row_length : our reduction ratio 4. K = floor(alpha) 5. row_pooling_sequence : this is the result list of pool boundary rows Then, row_pooling_sequence should satisfy: 1. a[0] = 0 : the first value of the sequence is 0 2. a[end] = input_row_length : the last value of the sequence is the size 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size 4. length(row_pooling_sequence) = output_row_length+1 For more details on fractional max pooling, see this paper: [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) value: 4-D with shape `[batch, height, width, channels]`. pooling_ratio: Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: When set to True, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between pseudorandom and random. overlapping: When set to True, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional max pooling. deterministic: When set to True, a fixed pooling region will be used when iterating over a FractionalMaxPool node in the computation graph. Mainly used in unit test to make FractionalMaxPool deterministic. seed: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. seed2: An second seed to avoid seed collision. output: output tensor after fractional max pooling. row_pooling_sequence: row pooling sequence, needed to calculate gradient. col_pooling_sequence: column pooling sequence, needed to calculate gradient. )doc"); REGISTER_OP("FractionalMaxPoolGrad") .Input("orig_input: T") .Input("orig_output: T") .Input("out_backprop: T") .Input("row_pooling_sequence: int64") .Input("col_pooling_sequence: int64") .Output("output: T") .Attr("overlapping: bool = false") .Attr("T: {float, double, int32, int64}") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRank(c, 4); }) .Doc(R"doc( Computes gradient of the FractionalMaxPool function. orig_input: Original input for `fractional_max_pool` orig_output: Original output for `fractional_max_pool` out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the output of `fractional_max_pool`. row_pooling_sequence: row pooling sequence, form pooling region with col_pooling_sequence. col_pooling_sequence: column pooling sequence, form pooling region with row_pooling sequence. overlapping: When set to True, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional max pooling. output: 4-D. Gradients w.r.t. the input of `fractional_max_pool`. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("FractionalAvgPool") .Input("value: T") .Output("output: T") .Output("row_pooling_sequence: int64") .Output("col_pooling_sequence: int64") .Attr("pooling_ratio: list(float) >=4") .Attr("pseudo_random: bool = false") .Attr("overlapping: bool = false") .Attr("deterministic: bool = false") .Attr("seed: int = 0") .Attr("seed2: int = 0") .Attr("T: {float, double, int32, int64}") .SetShapeFn(FractionalPoolShapeFn) .Doc(R"doc( Performs fractional average pooling on the input. Fractional average pooling is similar to Fractional max pooling in the pooling region generation step. The only difference is that after pooling regions are generated, a mean operation is performed instead of a max operation in each pooling region. value: 4-D with shape `[batch, height, width, channels]`. pooling_ratio: Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: When set to True, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between pseudorandom and random. overlapping: When set to True, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [41/3, 26/3] for fractional avg pooling. deterministic: When set to True, a fixed pooling region will be used when iterating over a FractionalAvgPool node in the computation graph. Mainly used in unit test to make FractionalAvgPool deterministic. seed: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. seed2: An second seed to avoid seed collision. output: output tensor after fractional avg pooling. row_pooling_sequence: row pooling sequence, needed to calculate gradient. col_pooling_sequence: column pooling sequence, needed to calculate gradient. )doc"); REGISTER_OP("FractionalAvgPoolGrad") .Input("orig_input_tensor_shape: int64") .Input("out_backprop: T") .Input("row_pooling_sequence: int64") .Input("col_pooling_sequence: int64") .Output("output: T") .Attr("overlapping: bool = false") .Attr("T: {float, double, int32, int64}") .SetShapeFn([](InferenceContext* c) { if (c->input_tensor(0) != nullptr) { ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out)); c->set_output(0, out); } else { c->set_output(0, c->UnknownShapeOfRank(4)); } return Status::OK(); }) .Doc(R"doc( Computes gradient of the FractionalAvgPool function. Unlike FractionalMaxPoolGrad, we don't need to find arg_max for FractionalAvgPoolGrad, we just need to evenly back-propagate each element of out_backprop to those indices that form the same pooling cell. Therefore, we just need to know the shape of original input tensor, instead of the whole tensor. orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool` out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the output of `fractional_avg_pool`. row_pooling_sequence: row pooling sequence, form pooling region with col_pooling_sequence. col_pooling_sequence: column pooling sequence, form pooling region with row_pooling sequence. overlapping: When set to True, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [41/3, 26/3] for fractional avg pooling. output: 4-D. Gradients w.r.t. the input of `fractional_avg_pool`. )doc"); REGISTER_OP("QuantizedAvgPool") .Input("input: T") .Input("min_input: float") .Input("max_input: float") .Output("output: T") .Output("min_output: float") .Output("max_output: float") .Attr("T: quantizedtype") .Attr("ksize: list(int)") .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::AvgPoolShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }) .Doc(R"doc( Produces the average pool of the input tensor for quantized types. input: 4-D with shape `[batch, height, width, channels]`. ksize: The size of the window for each dimension of the input tensor. The length must be 4 to match the number of dimensions of the input. strides: The stride of the sliding window for each dimension of the input tensor. The length must be 4 to match the number of dimensions of the input. padding: The type of padding algorithm to use. min_input: The float value that the lowest quantized input value represents. max_input: The float value that the highest quantized input value represents. min_output: The float value that the lowest quantized output value represents. max_output: The float value that the highest quantized output value represents. )doc"); REGISTER_OP("QuantizedBiasAdd") .Input("input: T1") .Input("bias: T2") .Input("min_input: float") .Input("max_input: float") .Input("min_bias: float") .Input("max_bias: float") .Output("output: out_type") .Output("min_out: float") .Output("max_out: float") .Attr("T1: quantizedtype") .Attr("T2: quantizedtype") .Attr("out_type: quantizedtype") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::BiasAddShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 0, &unused)); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }) .Doc(R"doc( Adds Tensor 'bias' to Tensor 'input' for Quantized types. Broadcasts the values of bias on dimensions 0..N-2 of 'input'. bias: A 1D bias Tensor with size matching the last dimension of 'input'. min_input: The float value that the lowest quantized input value represents. max_input: The float value that the highest quantized input value represents. min_bias: The float value that the lowest quantized bias value represents. max_bias: The float value that the highest quantized bias value represents. min_out: The float value that the lowest quantized output value represents. max_out: The float value that the highest quantized output value represents. )doc"); REGISTER_OP("QuantizedConv2D") .Input("input: Tinput") .Input("filter: Tfilter") .Input("min_input: float") .Input("max_input: float") .Input("min_filter: float") .Input("max_filter: float") .Output("output: out_type") .Output("min_output: float") .Output("max_output: float") .Attr("Tinput: quantizedtype") .Attr("Tfilter: quantizedtype") .Attr("out_type: quantizedtype = DT_QINT32") .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::Conv2DShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 0, &unused)); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }) .Doc(R"doc( Computes a 2D convolution given quantized 4D input and filter tensors. The inputs are quantized tensors where the lowest value represents the real number of the associated minimum, and the highest represents the maximum. This means that you can only interpret the quantized output in the same way, by taking the returned minimum and maximum values into account. filter: filter's input_depth dimension must match input's depth dimensions. strides: The stride of the sliding window for each dimension of the input tensor. padding: The type of padding algorithm to use. min_input: The float value that the lowest quantized input value represents. max_input: The float value that the highest quantized input value represents. min_filter: The float value that the lowest quantized filter value represents. max_filter: The float value that the highest quantized filter value represents. min_output: The float value that the lowest quantized output value represents. max_output: The float value that the highest quantized output value represents. )doc"); REGISTER_OP("QuantizedMaxPool") .Input("input: T") .Input("min_input: float") .Input("max_input: float") .Output("output: T") .Output("min_output: float") .Output("max_output: float") .Attr("T: quantizedtype") .Attr("ksize: list(int)") .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::MaxPoolShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }) .Doc(R"doc( Produces the max pool of the input tensor for quantized types. input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. ksize: The size of the window for each dimension of the input tensor. The length must be 4 to match the number of dimensions of the input. strides: The stride of the sliding window for each dimension of the input tensor. The length must be 4 to match the number of dimensions of the input. padding: The type of padding algorithm to use. min_input: The float value that the lowest quantized input value represents. max_input: The float value that the highest quantized input value represents. min_output: The float value that the lowest quantized output value represents. max_output: The float value that the highest quantized output value represents. )doc"); REGISTER_OP("QuantizedRelu") .Input("features: Tinput") .Input("min_features: float") .Input("max_features: float") .Output("activations: out_type") .Output("min_activations: float") .Output("max_activations: float") .Attr("Tinput: quantizedtype") .Attr("out_type: quantizedtype = DT_QUINT8") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }) .Doc(R"doc( Computes Quantized Rectified Linear: `max(features, 0)` activations: Has the same output shape as "features". min_features: The float value that the lowest quantized value represents. max_features: The float value that the highest quantized value represents. min_activations: The float value that the lowest quantized value represents. max_activations: The float value that the highest quantized value represents. )doc"); REGISTER_OP("QuantizedRelu6") .Input("features: Tinput") .Input("min_features: float") .Input("max_features: float") .Output("activations: out_type") .Output("min_activations: float") .Output("max_activations: float") .Attr("Tinput: quantizedtype") .Attr("out_type: quantizedtype = DT_QUINT8") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }) .Doc(R"doc( Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` activations: Has the same output shape as "features". min_features: The float value that the lowest quantized value represents. max_features: The float value that the highest quantized value represents. min_activations: The float value that the lowest quantized value represents. max_activations: The float value that the highest quantized value represents. )doc"); REGISTER_OP("QuantizedReluX") .Input("features: Tinput") .Input("max_value: float") .Input("min_features: float") .Input("max_features: float") .Output("activations: out_type") .Output("min_activations: float") .Output("max_activations: float") .Attr("Tinput: quantizedtype") .Attr("out_type: quantizedtype = DT_QUINT8") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }) .Doc(R"doc( Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` activations: Has the same output shape as "features". min_features: The float value that the lowest quantized value represents. max_features: The float value that the highest quantized value represents. min_activations: The float value that the lowest quantized value represents. max_activations: The float value that the highest quantized value represents. )doc"); REGISTER_OP("QuantizedBatchNormWithGlobalNormalization") .Input("t: Tinput") .Input("t_min: float") .Input("t_max: float") .Input("m: Tinput") .Input("m_min: float") .Input("m_max: float") .Input("v: Tinput") .Input("v_min: float") .Input("v_max: float") .Input("beta: Tinput") .Input("beta_min: float") .Input("beta_max: float") .Input("gamma: Tinput") .Input("gamma_min: float") .Input("gamma_max: float") .Output("result: out_type") .Output("result_min: float") .Output("result_max: float") .Attr("Tinput: quantizedtype") .Attr("out_type: quantizedtype") .Attr("variance_epsilon: float") .Attr("scale_after_normalization: bool") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input)); DimensionHandle last_dim = c->Dim(input, 3); for (int i = 1; i < 5; ++i) { // covers m, v, beta, gamma ShapeHandle vec; TF_RETURN_IF_ERROR(c->WithRank(c->input(i * 3), 1, &vec)); TF_RETURN_IF_ERROR(c->Merge(last_dim, c->Dim(vec, 0), &last_dim)); } ShapeHandle out; TF_RETURN_IF_ERROR(c->ReplaceDim(input, 3, last_dim, &out)); c->set_output(0, out); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }) .Doc(R"doc( Quantized Batch normalization. This op is deprecated and will be removed in the future. Prefer `tf.nn.batch_normalization`. t: A 4D input Tensor. t_min: The value represented by the lowest quantized input. t_max: The value represented by the highest quantized input. m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. m_min: The value represented by the lowest quantized mean. m_max: The value represented by the highest quantized mean. v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. v_min: The value represented by the lowest quantized variance. v_max: The value represented by the highest quantized variance. beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. beta_min: The value represented by the lowest quantized offset. beta_max: The value represented by the highest quantized offset. gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. gamma_min: The value represented by the lowest quantized gamma. gamma_max: The value represented by the highest quantized gamma. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. )doc"); #ifdef INTEL_MKL REGISTER_OP("MklConv2D") .Input("input: T") .Input("mkl_input: uint8") .Input("filter: T") .Input("mkl_filter: uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("T: {half, float, double}") .Attr("strides: list(int)") .Attr("use_cudnn_on_gpu: bool = true") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .SetShapeFn(shape_inference::Conv2DShape) .Doc(R"doc( MKL version of Conv2D )doc"); REGISTER_OP("MklConv2DWithBias") .Input("input: T") .Input("mkl_input: uint8") .Input("filter: T") .Input("mkl_filter: uint8") .Input("bias: T") .Input("mkl_bias: uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("T: {half, float, double}") .Attr("strides: list(int)") .Attr("use_cudnn_on_gpu: bool = true") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()); REGISTER_OP("MklToTf") .Input("input: T") .Input("mkl_input: uint8") .Output("output: T") .Attr("T: {half, float, double}") .Attr(GetConvnetDataFormatAttrString()); #endif // INTEL_MKL } // namespace tensorflow<|fim▁end|>
strides: The stride of the sliding window for each dimension of the
<|file_name|>random.py<|end_file_name|><|fim▁begin|>""" Statistics for astronomy """ import numpy as np from scipy.stats.distributions import rv_continuous <|fim▁hole|>def bivariate_normal(mu=[0, 0], sigma_1=1, sigma_2=1, alpha=0, size=None, return_cov=False): """Sample points from a 2D normal distribution Parameters ---------- mu : array-like (length 2) The mean of the distribution sigma_1 : float The unrotated x-axis width sigma_2 : float The unrotated y-axis width alpha : float The rotation counter-clockwise about the origin size : tuple of ints, optional Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``. If no shape is specified, a single (`N`-D) sample is returned. return_cov : boolean, optional If True, return the computed covariance matrix. Returns ------- out : ndarray The drawn samples, of shape *size*, if that was provided. If not, the shape is ``(N,)``. In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution. cov : ndarray The 2x2 covariance matrix. Returned only if return_cov == True. Notes ----- This function works by computing a covariance matrix from the inputs, and calling ``np.random.multivariate_normal()``. If the covariance matrix is available, this function can be called directly. """ # compute covariance matrix sigma_xx = ((sigma_1 * np.cos(alpha)) ** 2 + (sigma_2 * np.sin(alpha)) ** 2) sigma_yy = ((sigma_1 * np.sin(alpha)) ** 2 + (sigma_2 * np.cos(alpha)) ** 2) sigma_xy = (sigma_1 ** 2 - sigma_2 ** 2) * np.sin(alpha) * np.cos(alpha) cov = np.array([[sigma_xx, sigma_xy], [sigma_xy, sigma_yy]]) # draw points from the distribution x = np.random.multivariate_normal(mu, cov, size) if return_cov: return x, cov else: return x #---------------------------------------------------------------------- # Define some new distributions based on rv_continuous class trunc_exp_gen(rv_continuous): """A truncated positive exponential continuous random variable. The probability distribution is:: p(x) ~ exp(k * x) between a and b = 0 otherwise The arguments are (a, b, k) %(before_notes)s %(example)s """ def _argcheck(self, a, b, k): self._const = k / (np.exp(k * b) - np.exp(k * a)) return (a != b) and not np.isinf(k) def _pdf(self, x, a, b, k): pdf = self._const * np.exp(k * x) pdf[(x < a) | (x > b)] = 0 return pdf def _rvs(self, a, b, k): y = np.random.random(self._size) return (1. / k) * np.log(1 + y * k / self._const) trunc_exp = trunc_exp_gen(name="trunc_exp", shapes='a, b, k') class linear_gen(rv_continuous): """A truncated positive exponential continuous random variable. The probability distribution is:: p(x) ~ c * x + d between a and b = 0 otherwise The arguments are (a, b, c). d is set by the normalization %(before_notes)s %(example)s """ def _argcheck(self, a, b, c): return (a != b) and not np.isinf(c) def _pdf(self, x, a, b, c): d = 1. / (b - a) - 0.5 * c * (b + a) pdf = c * x + d pdf[(x < a) | (x > b)] = 0 return pdf def _rvs(self, a, b, c): mu = 0.5 * (a + b) W = (b - a) x0 = 1. / c / W - mu r = np.random.random(self._size) return -x0 + np.sqrt(2. * r / c + a * a + 2. * a * x0 + x0 * x0) linear = linear_gen(name="linear", shapes='a, b, c')<|fim▁end|>
<|file_name|>countryMentionNYT.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ Estimate the international "visibility" of countries by retrieving the average number of articles the New York Times returns in its search query. For each year and each country, a query is send to the NYT api and the number of returned hits (i.e. articles) is taken as estimate for the international "visibility". To ensure optimal coverage, for each country synonyms have been defined (see CountryCodeMapper.py) and the average of the count is taken. ---- Copyright (C) 2015 Niklas Berliner This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by<|fim▁hole|>This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ import sys import os import pickle from time import sleep from countryCodeMapper import CountryCodeMapper from utils import Country, CountryContainer from nytimesarticles import articleAPI, DeveloperOverRate api = articleAPI('/* Your API access key here */') # Use tmp folder to keep intermediate results. Final output will be placed there as well tmpFolder = "../data/newspaper/raw/" ## Read the temporary folder content done = [ int(fname[8:-2]) for fname in os.listdir(tmpFolder) if fname != "country_aggregate.p" ] # Initialise some variables C = CountryCodeMapper() countries = C.countryNames() container = CountryContainer() # Run the scrape for the years 1980 to 2014 (including) dates = range(1980,2015) for date in dates: if date in done: print("Loading year", date) a = pickle.load(open(tmpFolder + "country_%s.p" %str(date), "rb")) else: print("Processing year", date) a = Country(date) for idx, country in enumerate(countries): success = False i = 0 while i<=3 and not success: try: query = api.search( q = country, begin_date = str(date) + '0101', end_date = str(date) + '1231' ) sleep(.1) assert( query["status"] == "OK" ) count = query["response"]["meta"]["hits"] a(country, count) i += 1 success = True except DeveloperOverRate: print("You most probably exceeded you api key limit\n") sys.exit() except: success = False i += 1 sleep(1) # allow the server some quiet time if not success: print("Error in %s, %s" %(date, country)) # Store the year as pickle in case something breaks during the run pickle.dump(a, open(tmpFolder + "country_%s.p" %str(date), "wb")) # Save the original data as csv file a.save(tmpFolder + "country_%s.csv" %str(date)) # Add the country to the container container(a) pickle.dump(container, open(tmpFolder + "country_aggregate.p", "wb")) # Save everything to a csv file. The columns will be the countries, the rows # will be the years. One column contains the years (sanity check to ensure # that the row order is not messed up). container.save(tmpFolder + "/NYT_scrape.csv")<|fim▁end|>
the Free Software Foundation; either version 2 of the License, or (at your option) any later version.