filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
tools/mcsema_disass/ida7/exception.py
#!/usr/bin/env python # Copyright (c) 2020 Trail of Bits, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import idautils import idaapi import idc import sys import os import argparse import struct import traceback import collections import itertools import pprint import ida_bytes from collections import namedtuple # Bring in utility libraries. from util import * frame_entry = namedtuple('frame_entry', ['cs_start', 'cs_end', 'cs_lp', 'cs_action', 'action_list']) _FUNC_UNWIND_FRAME_EAS = set() _EXCEPTION_BLOCKS_EAS = dict() DW_EH_PE_ptr = 0x00 DW_EH_PE_uleb128 = 0x01 DW_EH_PE_udata2 = 0x02 DW_EH_PE_udata4 = 0x03 DW_EH_PE_udata8 = 0x04 DW_EH_PE_signed = 0x08 DW_EH_PE_sleb128 = 0x09 DW_EH_PE_sdata2 = 0x0A DW_EH_PE_sdata4 = 0x0B DW_EH_PE_sdata8 = 0x0C DW_EH_PE_absptr = 0x00 DW_EH_PE_pcrel = 0x10 DW_EH_PE_textrel = 0x20 DW_EH_PE_datarel = 0x30 DW_EH_PE_funcrel = 0x40 DW_EH_PE_aligned = 0x50 DW_EH_PE_indirect = 0x80 DW_EH_PE_omit = 0xFF class EHBlocks(object): def __init__(self, start_ea, end_ea): self.start_ea = start_ea self.end_ea = end_ea def make_array(ea, size): if ea != idc.BADADDR and ea != 0: flags = idc.get_full_flags(ea) if not idc.isByte(flags) or idc.get_item_size(ea) != 1: idc.del_items(ea, idc.DOUNK_SIMPLE, 1) ida_bytes.create_data(ea, ida_bytes.FF_BYTE, 1, ida_idaapi.BADADDR) idc.make_array(ea, size) def read_string(ea): s = idc.get_strlit_contents(ea, -1, idc.ASCSTR_C) if s: slen = len(s)+1 idc.del_items(ea, idc.DOUNK_SIMPLE, slen) idaapi.make_ascii_string(ea, slen, idc.ASCSTR_C) return s, ea + slen else: return s, ea def read_uleb128(ea): return read_leb128(ea, False) def read_sleb128(ea): return read_leb128(ea, True) def enc_size(enc): """ Read encoding size """ fmt = enc & 0x0F if fmt == DW_EH_PE_ptr: return get_address_size_in_bytes() elif fmt in [DW_EH_PE_sdata2, DW_EH_PE_udata2]: return 2 elif fmt in [DW_EH_PE_sdata4, DW_EH_PE_udata4]: return 4 elif fmt in [DW_EH_PE_sdata8, DW_EH_PE_udata8]: return 8 elif fmt != DW_EH_PE_omit: DEBUG("Encoding {0:x} is not of fixed size".format(enc)) return 0 def read_enc_value(ea, enc): """ Read encoded value """ if enc == DW_EH_PE_omit: DEBUG("Error in read_enc_val {0:x}".format(ea)) return idc.BADADDR, idc.BADADDR start = ea fmt, mod = enc&0x0F, enc&0x70 if fmt == DW_EH_PE_ptr: val = read_pointer(ea) ea += get_address_size_in_bytes() elif fmt in [DW_EH_PE_uleb128, DW_EH_PE_sleb128]: val, ea = read_leb128(ea, fmt == DW_EH_PE_sleb128) if ea - start > 1: make_array(start, ea - start) elif fmt in [DW_EH_PE_sdata2, DW_EH_PE_udata2]: val = read_word(ea) ea += 2 if fmt == DW_EH_PE_sdata2: val = sign_extend(val, 16) elif fmt in [DW_EH_PE_sdata4, DW_EH_PE_udata4]: val = read_dword(ea) ea += 4 if fmt == DW_EH_PE_sdata4: val = sign_extend(val, 32) elif fmt in [DW_EH_PE_sdata8, DW_EH_PE_udata8]: val = read_qword(ea) ea += 8 if f == DW_EH_PE_sdata8: val = sign_extend(val, 64) else: DEBUG("{0:x}: don't know how to handle {1:x}".format(start, enc)) return idc.BADADDR, idc.BADADDR if mod == DW_EH_PE_pcrel: if val != 0: val += start val &= (1<<(get_address_size_in_bits())) - 1 elif mod != DW_EH_PE_absptr: DEBUG("{0:x}: don't know how to handle {1:x}".format(start, enc)) return BADADDR, BADADDR if (enc & DW_EH_PE_indirect) and val != 0: if not idc.isLoaded(val): DEBUG("{0:x}: dereference invalid pointer {1:x}".format(start, val)) return idc.BADADDR, idc.BADADDR val = read_pointer(val) return val, ea def _create_frame_entry(start = None, end = None, lp = None, action = None, act_list = None): return frame_entry(start, end, lp, action, act_list) def format_lsda_actions(action_tbl, act_ea, type_addr, type_enc, act_id): """ Recover the exception actions and type info """ action_list = [] if action_tbl == idc.BADADDR: return DEBUG("start action ea : {:x}".format(act_ea)) while True: ar_filter,ea2 = read_enc_value(act_ea, DW_EH_PE_sleb128) ar_disp, ea3 = read_enc_value(ea2, DW_EH_PE_sleb128) if ar_filter > 0: type_slot = type_addr - ar_filter * enc_size(type_enc) type_ea, eatmp = read_enc_value(type_slot, type_enc) DEBUG("catch type typeinfo = {:x} {} {}".format(type_ea, get_symbol_name(type_ea), ar_filter)) action_list.append((ar_disp, ar_filter, type_ea)) #DEBUG(" format_lsda_actions ea {:x}: ar_disp[{}]: {} ({:x})".format(act_ea, act_id, ar_disp, ar_filter)) if ar_disp == 0: break act_ea = ea2 + ar_disp return action_list def create_block_entries(start_ea, heads): index = 0 block_set = set() for entry in heads: if entry == 0: continue if index < len(heads) - 1: ea = heads[index] while heads[index] <= ea < heads[index + 1]: inst, _ = decode_instruction(ea) if not inst: break block = EHBlocks(ea, ea + inst.size) ea = ea + inst.size block_set.add(block) index = index + 1 _EXCEPTION_BLOCKS_EAS[start_ea] = block_set def format_lsda(lsda_ptr, start_ea, range = None, sjlj = False): """ Recover the language specific data area """ lsda_entries = list() heads = set() lpstart_enc, ea = read_byte(lsda_ptr), lsda_ptr + 1 if lpstart_enc != DW_EH_PE_omit: lpstart, next_ea = read_enc_value(ea, lpstart_enc) ea = next_ea else: lpstart = start_ea # get the type encoding and type address associated with the exception handling blocks type_enc, ea = read_byte(ea), ea + 1 type_addr = idc.BADADDR if type_enc != DW_EH_PE_omit: type_off, next_ea = read_enc_value(ea, DW_EH_PE_uleb128) type_addr = next_ea + type_off ea = next_ea cs_enc, next_ea = read_byte(ea), ea + 1 ea = next_ea cs_len, next_ea = read_enc_value(ea, DW_EH_PE_uleb128) action_tbl = next_ea + cs_len ea = next_ea i = 0 actions = [] action_list = [] while ea < action_tbl: if sjlj: cs_lp, next_ea = read_enc_val(ea, DW_EH_PE_uleb128, True) act_ea = next_ea cs_action, next_ea = read_enc_value(next_ea, DW_EH_PE_uleb128) DEBUG("ea {:x}: cs_lp[{}] = {}".format(ea, i, cs_lp)) ea = next_ea else: cs_start, next_ea = read_enc_value(ea, cs_enc) cs_start += lpstart DEBUG("ea {:x}: cs_start[{}] = {:x} ({})".format(ea, i, cs_start, get_symbol_name(start_ea))) ea = next_ea heads.add(cs_start) cs_len, next_ea = read_enc_value(ea, cs_enc & 0x0F) cs_end = cs_start + cs_len DEBUG("ea {:x}: cs_len[{:x}] = {} (end = {:x})".format(ea, i, cs_len, cs_start + cs_len)) ea = next_ea heads.add(cs_end) cs_lp, next_ea = read_enc_value(ea, cs_enc) cs_lp = cs_lp + lpstart if cs_lp != 0 else cs_lp act_ea = next_ea DEBUG("ea {:x}: cs_lp[{}] = {:x}".format(ea, i, cs_lp)) ea = next_ea if cs_lp != 0: heads.add(cs_lp) cs_action, next_ea = read_enc_value(ea, DW_EH_PE_uleb128) ea = next_ea if cs_action != 0: actions.append(cs_action) DEBUG_PUSH() DEBUG("Landing pad for {0:x}..{1:x}".format(cs_start, cs_start + cs_len)) DEBUG_POP() if cs_action != 0: action_offset = action_tbl + cs_action - 1 action_list = format_lsda_actions(action_tbl, action_offset, type_addr, type_enc, cs_action) lsda_entries.append(_create_frame_entry(cs_start, cs_start + cs_len, cs_lp, cs_action, action_list)) DEBUG("ea {:x}: cs_action[{}] = {}".format(act_ea, i, cs_action)) i += 1 create_block_entries(start_ea, sorted(heads)) FUNC_LSDA_ENTRIES[start_ea] = lsda_entries class AugmentationData: def __init__(self): self.aug_present = False self.lsda_encoding = DW_EH_PE_omit self.personality_ptr = None self.fde_encoding = DW_EH_PE_absptr class EHRecord: def __init__(self): self.type = "" self.version = None self.data = None self.aug_string = "" self.code_align = None self.data_align = None self.retn_reg = None _AUGM_PARAM = dict() def format_entries(ea): """ Check the types of entries CIE/FDE recover them """ start_ea = ea size, ea = read_dword(ea), ea + 4 if size == 0: return idc.BADADDR end_ea = ea + size entry = EHRecord() cie_id, ea = read_dword(ea), ea + 4 is_cie = cie_id == 0 entry.type = ["FDE", "CIE"][is_cie] #DEBUG("ea {0:x}: type {1} size {2}".format(start_ea, entry.type, size)) if is_cie: entry.version, ea = read_byte(ea), ea + 1 entry.aug_string, ea = read_string(ea) if entry.aug_string is None: return end_ea entry.code_align, ea = read_uleb128(ea) entry.data_align, ea = read_uleb128(ea) if entry.version == 1: entry.retn_reg, ea = read_byte(ea), ea + 1 else: entry.retn_reg, ea = read_uleb128(ea) aug_data = AugmentationData() if entry.aug_string[0:1]=='z': aug_len, ea = read_uleb128(ea) aug_data.aug_present = True for s in entry.aug_string[1:]: if s == 'L': aug_data.lsda_encoding, ea = read_byte(ea), ea + 1 elif s == 'P': enc, ea = read_byte(ea), ea + 1 aug_data.personality_ptr, ea2 = read_enc_value(ea, enc) #DEBUG("ea {0:x}: personality function {1:x}".format(ea, aug_data.personality_ptr)) ea = ea2 elif s == 'R': aug_data.fde_encoding, ea = read_byte(ea), ea + 1 else: #DEBUG("ea {0:x}: unhandled string char {1}".format(ea, s)) return idc.BADADDR _AUGM_PARAM[start_ea] = aug_data else: base_ea = ea - 4 cie_ea = base_ea - cie_id if cie_ea in _AUGM_PARAM: aug_data = _AUGM_PARAM[cie_ea] else: return idc.BADADDR pc_begin, ea2 = read_enc_value(ea, aug_data.fde_encoding) #DEBUG("ea {0:x}: CIE pointer".format(base_ea)) #DEBUG("ea {0:x}: PC begin={1:x}".format(ea, pc_begin)) ea = ea2 range_len, ea2 = read_enc_value(ea, aug_data.fde_encoding & 0x0F) #DEBUG("ea {:x}: PC range = {:x} (PC end={:x})".format(ea, range_len, range_len + pc_begin)) if range_len: _FUNC_UNWIND_FRAME_EAS.add((pc_begin, range_len)) ea = ea2 if aug_data.aug_present: aug_len, ea = read_uleb128(ea) if aug_data.lsda_encoding != DW_EH_PE_omit: lsda_ptr, ea2 = read_enc_value(ea, aug_data.lsda_encoding) #DEBUG("ea {0:x}: LSDA pointer {1:x}".format(ea, lsda_ptr)) DEBUG_PUSH() if lsda_ptr: format_lsda(lsda_ptr, pc_begin, range_len, False) DEBUG_POP() return end_ea def recover_frame_entries(seg_ea): if seg_ea == idc.BADADDR: return DEBUG("Recover entries from section : {}".format(idc.get_segm_name(seg_ea))) ea = idc.get_segm_start(seg_ea) end_ea = idc.get_segm_end(seg_ea) while ea != idc.BADADDR and ea < end_ea: ea = format_entries(ea) def recover_exception_table(): """ Recover the CIE and FDE entries from the segment .eh_frame """ seg_eas = [ea for ea in idautils.Segments() if not is_invalid_ea(ea)] for seg_ea in seg_eas: seg_name = idc.get_segm_name(seg_ea) if seg_name in [".eh_frame", "__eh_frame"]: recover_frame_entries(seg_ea) break def recover_exception_entries(F, func_ea): has_unwind_frame = func_ea in FUNC_LSDA_ENTRIES.keys() if has_unwind_frame: lsda_entries = FUNC_LSDA_ENTRIES[func_ea] for entry in lsda_entries: EH = F.eh_frame.add() EH.func_ea = func_ea EH.start_ea = entry.cs_start EH.end_ea = entry.cs_end EH.lp_ea = entry.cs_lp EH.action = entry.cs_action != 0 for ar_disp, ar_filter, type_ea in entry.action_list: AC = EH.ttype.add() AC.ea = type_ea AC.name = get_symbol_name(type_ea) AC.size = ar_filter AC.is_weak = False AC.is_thread_local = False def fix_function_bounds(min_ea, max_ea): for func_ea, range in _FUNC_UNWIND_FRAME_EAS: if func_ea == min_ea: return func_ea, func_ea + range return min_ea, max_ea def get_exception_landingpad(F, insn_ea): has_lp = F.ea in FUNC_LSDA_ENTRIES.keys() if has_lp: lsda_entries = FUNC_LSDA_ENTRIES[F.ea] for entry in lsda_entries: if insn_ea >= entry.cs_start and insn_ea < entry.cs_end: return entry.cs_lp return 0 def get_exception_chunks(sub_ea): has_block = sub_ea in _EXCEPTION_BLOCKS_EAS.keys() if has_block: block_set = _EXCEPTION_BLOCKS_EAS[sub_ea] for block in block_set: yield block.start_ea, block.end_ea
[]
[]
[]
[]
[]
python
null
null
null
aidbox_python_sdk/main.py
import asyncio import logging import sys import errno import os from pathlib import Path from aiohttp import web, ClientSession, BasicAuth, client_exceptions from .handlers import routes logger = logging.getLogger('aidbox_sdk') THIS_DIR = Path(__file__).parent BASE_DIR = THIS_DIR.parent def setup_routes(app): app.add_routes(routes) async def init_aidbox(app): try: json = { 'url': app['settings'].APP_URL, 'app_id': app['settings'].APP_ID, 'secret': app['settings'].APP_SECRET, } async with app['init_http_client'].post( '{}/App/$init'.format(app['settings'].APP_INIT_URL), json=json ) as resp: if 200 <= resp.status < 300: logger.info('Initializing Aidbox app...') else: logger.error( 'Aidbox app initialized failed. ' 'Response from Aidbox: {0} {1}'.format( resp.status, await resp.text() ) ) sys.exit(errno.EINTR) except ( client_exceptions.ServerDisconnectedError, client_exceptions.ClientConnectionError ): logger.error( 'Aidbox address is unreachable {}'.format( app['settings'].APP_INIT_URL ) ) sys.exit(errno.EINTR) async def wait_and_init_aidbox(app): address = app['settings'].APP_URL logger.debug("Check availability of {}".format(address)) while 1: try: async with app['init_http_client'].get(address, timeout=5): pass break except ( asyncio.TimeoutError, client_exceptions.InvalidURL, client_exceptions.ClientConnectionError ): await asyncio.sleep(2) await init_aidbox(app) def fake_config(settings, client): return { 'type': 'config', 'box': { 'base-url': settings.APP_INIT_URL, }, 'client': client, } async def fast_start(app): if not os.environ.get('APP_FAST_START_MODE', 'FALSE').upper() == 'TRUE': return False manifest = {} async with app['init_http_client'].get( '{}/App/{}'.format( app['settings'].APP_INIT_URL, app['settings'].APP_ID) ) as resp: if resp.status == 200: manifest = await resp.json() if 'meta' in manifest: del manifest['meta'] if not manifest or app['sdk'].build_manifest() != manifest: logger.info('Fast start failed due to new manifest') return False client = None async with app['init_http_client'].get( '{}/Client/{}'.format( app['settings'].APP_INIT_URL, app['settings'].APP_ID) ) as resp: if resp.status == 200: client = await resp.json() if not client: logger.info('Fast start failed due to absence of app client') return False config = fake_config(app['settings'], client) await app['sdk'].initialize(config) return True async def on_startup(app): basic_auth = BasicAuth( login=app['settings'].APP_INIT_CLIENT_ID, password=app['settings'].APP_INIT_CLIENT_SECRET ) app['init_http_client'] = ClientSession(auth=basic_auth) if not await fast_start(app): asyncio.get_event_loop().create_task(wait_and_init_aidbox(app)) async def on_cleanup(app): await app['init_http_client'].close() await app['sdk'].deinitialize() async def on_shutdown(app): if not app['init_http_client'].closed: await app['init_http_client'].close() async def create_app(settings, sdk, debug=False): app = web.Application() app.on_startup.append(on_startup) app.on_cleanup.append(on_cleanup) app.on_shutdown.append(on_shutdown) app.update( name='aidbox-python-sdk', settings=settings, sdk=sdk, init_aidbox_app=init_aidbox, livereload=True ) setup_routes(app) return app
[]
[]
[ "APP_FAST_START_MODE" ]
[]
["APP_FAST_START_MODE"]
python
1
0
application.py
import os import re from flask import Flask, jsonify, render_template, request, url_for from flask_jsglue import JSGlue from cs50 import SQL from helpers import lookup # configure application app = Flask(__name__) JSGlue(app) # ensure responses aren't cached if app.config["DEBUG"]: @app.after_request def after_request(response): response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" response.headers["Expires"] = 0 response.headers["Pragma"] = "no-cache" return response # configure CS50 Library to use SQLite database db = SQL("sqlite:///mashup.db") @app.route("/") def index(): """Render map.""" os.environ["API_KEY"] = "{Insert API_Key here}" if not os.environ.get("API_KEY"): raise RuntimeError("API_KEY not set") return render_template("index.html", key=os.environ.get("API_KEY")) @app.route("/articles") def articles(): """Look up articles for geo.""" # retrieve a valid geo argument from HTML form geo = request.args.get("geo") if not geo: raise RuntimeError("geo not set") # search for articles via the lookup function articles = lookup(geo) # return 5 such articles as JSON objects if len(articles) > 5: return jsonify([articles[0], articles[1], articles[2], articles[3], articles[4]]) else: return jsonify(articles) @app.route("/search") def search(): """Search for places that match query.""" #retrieve q and SQL's "wildcard" character from HTML form q = request.args.get("q") + "%" # search the database for values matching user input place = db.execute("SELECT * FROM places WHERE postal_code LIKE :q OR \ place_name LIKE :q OR admin_name1 LIKE :q", q=q) # return 10 such place as JSON objects if len(place) > 10: return jsonify([place[0], place[1], place[2], place[3], place[4], place[5], place[6], place[7], place[8], place[9]]) else: return jsonify(place) @app.route("/update") def update(): """Find up to 10 places within view.""" # ensure parameters are present if not request.args.get("sw"): raise RuntimeError("missing sw") if not request.args.get("ne"): raise RuntimeError("missing ne") # ensure parameters are in lat,lng format if not re.search("^-?\d+(?:\.\d+)?,-?\d+(?:\.\d+)?$", request.args.get("sw")): raise RuntimeError("invalid sw") if not re.search("^-?\d+(?:\.\d+)?,-?\d+(?:\.\d+)?$", request.args.get("ne")): raise RuntimeError("invalid ne") # explode southwest corner into two variables (sw_lat, sw_lng) = [float(s) for s in request.args.get("sw").split(",")] # explode northeast corner into two variables (ne_lat, ne_lng) = [float(s) for s in request.args.get("ne").split(",")] # find 10 cities within view, pseudorandomly chosen if more within view if (sw_lng <= ne_lng): # doesn't cross the antimeridian rows = db.execute("""SELECT * FROM places WHERE :sw_lat <= latitude AND latitude <= :ne_lat AND (:sw_lng <= longitude AND longitude <= :ne_lng) GROUP BY country_code, place_name, admin_code1 ORDER BY RANDOM() LIMIT 10""", sw_lat=sw_lat, ne_lat=ne_lat, sw_lng=sw_lng, ne_lng=ne_lng) else: # crosses the antimeridian rows = db.execute("""SELECT * FROM places WHERE :sw_lat <= latitude AND latitude <= :ne_lat AND (:sw_lng <= longitude OR longitude <= :ne_lng) GROUP BY country_code, place_name, admin_code1 ORDER BY RANDOM() LIMIT 10""", sw_lat=sw_lat, ne_lat=ne_lat, sw_lng=sw_lng, ne_lng=ne_lng) # output places as JSON return jsonify(rows)
[]
[]
[ "API_KEY" ]
[]
["API_KEY"]
python
1
0
src/experiments.py
"""Main script""" import os import pandas as pd import numpy as np from tabulate import tabulate from cenotaph.basics.base_classes import Ensemble from cenotaph.classification.one_class import EllipticEnvelope, NND, SVM from cenotaph.colour.colour_descriptors import FullHist, MarginalHists from cenotaph.texture.hep.greyscale import ILBP, LBP from cenotaph.texture.hep.colour import OCLBP, IOCLBP from cenotaph.texture.filtering import Gabor from cenotaph.cnn import DenseNet121, MobileNet, ResNet50, VGG16, Xception from functions import get_accuracy #This is to avoid memory errors with convnets os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' #Base folder for the image datasets data_folder = '../data/images' #Cache folder where to store image features and splits cache_folder = '../cache' feature_cache = f'{cache_folder}/features' hep_luts = f'{cache_folder}/hep-luts' #Cache folder where to store the classification results classification_cache = f'{cache_folder}/classification' #Folder where to store the results ion LaTeX form latex_folder = 'LaTeX' #Fraction of normal samples used for training the classifier train_ratio = 0.5 #Number of train/test splits num_splits = 50 #Cache folder where to store the train/test splits splits_cache = f'{cache_folder}/splits/{num_splits}' #Create the cache folders if they do not exist dirs = [classification_cache, feature_cache, splits_cache, latex_folder, hep_luts] for dir_ in dirs: if not os.path.isdir(dir_): os.makedirs(dir_) #Common settings for LBP-like descriptors lbp_common_settings = {'num_peripheral_points': 8, 'group_action': 'C', 'cache_folder': hep_luts} traditional_descriptors =\ {'FullColHist': FullHist(nbins = 10), 'MargColHists': MarginalHists(nbins = (256, 256, 256)), 'Gabor': Gabor(size = 6), 'LBP': Ensemble(image_descriptors= [LBP(radius=1, **lbp_common_settings), LBP(radius=2, **lbp_common_settings), LBP(radius=3, **lbp_common_settings)]), 'ILBP': Ensemble(image_descriptors= [ILBP(radius=1, **lbp_common_settings), ILBP(radius=2, **lbp_common_settings), ILBP(radius=3, **lbp_common_settings)]), 'OCLBP': Ensemble(image_descriptors= [OCLBP(radius=1, **lbp_common_settings), OCLBP(radius=2, **lbp_common_settings), OCLBP(radius=3, **lbp_common_settings)]), 'IOCLBP': Ensemble(image_descriptors= [IOCLBP(radius=1, **lbp_common_settings), IOCLBP(radius=2, **lbp_common_settings), IOCLBP(radius=3, **lbp_common_settings)]) } cnns = {'DenseNet-121': DenseNet121(), 'MobileNet': MobileNet(), 'VGG16': VGG16(), 'ResNet-50': ResNet50(), 'Xception': Xception()} descriptors = {**traditional_descriptors, **cnns} classifiers = {'3-NN': NND(k = 3)} datasets = ['Carpet-01', 'Concrete-01', 'Fabric-01', 'Fabric-02', 'Layered-01', 'Leather-01', 'Paper-01', 'Paper-02', 'Wood-01'] for classifier_name, classifier in classifiers.items(): df = pd.DataFrame() for dataset in datasets: source_folder = f'{data_folder}/{dataset}' record = dict() for descriptor_name, descriptor in descriptors.items(): accuracy =\ get_accuracy(descriptor = descriptor, descriptor_name = descriptor_name, classifier = classifier, classifier_name = classifier_name, dataset_folder = source_folder, dataset_name = dataset, feature_cache = feature_cache, classification_cache = classification_cache, splits_cache = splits_cache, train_ratio = train_ratio, num_splits = num_splits) avg_acc = 100*np.mean(accuracy) print(f'Avg accuracy of {descriptor_name}/{classifier_name} on ' f'{dataset} = {avg_acc:4.2f}') record.update({'Feature': descriptor_name, 'Dataset': dataset, 'Accuracy (mean)': avg_acc}) df = df.append(record, ignore_index=True) print(f'Classifier: {classifier_name}') print(tabulate(df)) #---------- Store the results in a LaTeX table ---------- latex_dest = f'{latex_folder}/{classifier_name}.tex' with open(latex_dest, 'w') as fp: #Header cols = (['c']*(len(datasets) + 1)) cols = ''.join(cols) fp.write(f'\\begin{{tabular}}{{{cols}}}\n') fp.write(f'\\toprule\n') fp.write(f'& \\multicolumn{{{len(datasets)}}}{{c}}{{Datasets}}\\\\') fp.write('Descriptor') offset = ord('A') str_ = str() #for d, _ in enumerate(datasets): #fp.write(f' & {chr(offset + d)}') for dataset in datasets: fp.write(f' & \\rotatebox{{90}}{{{dataset}}}') fp.write('\\\\\n') fp.write(f'\\midrule\n') #Records for descriptor_name in descriptors.keys(): fp.write(f'{descriptor_name}') for dataset in datasets: acc = df.loc[(df['Feature'] == descriptor_name) & (df['Dataset'] == dataset)]['Accuracy (mean)'] acc = acc.tolist()[0] fp.write(f' & {acc:3.1f}') fp.write('\\\\\n') #Footer fp.write(f'\\bottomrule\n') fp.write(f'\\end{{tabular}}\n') #--------------------------------------------------------
[]
[]
[ "TF_FORCE_GPU_ALLOW_GROWTH" ]
[]
["TF_FORCE_GPU_ALLOW_GROWTH"]
python
1
0
vimeoct/asgi.py
""" ASGI config for vimeoct project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vimeoct.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package leaderelection implements leader election of a set of endpoints. // It uses an annotation in the endpoints object to store the record of the // election state. This implementation does not guarantee that only one // client is acting as a leader (a.k.a. fencing). // // A client only acts on timestamps captured locally to infer the state of the // leader election. The client does not consider timestamps in the leader // election record to be accurate because these timestamps may not have been // produced by a local clock. The implemention does not depend on their // accuracy and only uses their change to indicate that another client has // renewed the leader lease. Thus the implementation is tolerant to arbitrary // clock skew, but is not tolerant to arbitrary clock skew rate. // // However the level of tolerance to skew rate can be configured by setting // RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a // maximum tolerated ratio of time passed on the fastest node to time passed on // the slowest node can be approximately achieved with a configuration that sets // the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted // to tolerate some nodes progressing forward in time twice as fast as other nodes, // the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds. // // While not required, some method of clock synchronization between nodes in the // cluster is highly recommended. It's important to keep in mind when configuring // this client that the tolerance to skew rate varies inversely to master // availability. // // Larger clusters often have a more lenient SLA for API latency. This should be // taken into account when configuring the client. The rate of leader transitions // should be monitored and RetryPeriod and LeaseDuration should be increased // until the rate is stable and acceptably low. It's important to keep in mind // when configuring this client that the tolerance to API latency varies inversely // to master availability. // // DISCLAIMER: this is an alpha API. This library will likely change significantly // or even be removed entirely in subsequent releases. Depend on this API at // your own risk. package leaderelection import ( "bytes" "context" "fmt" "time" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" rl "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/klog/v2" ) const ( JitterFactor = 1.2 ) // NewLeaderElector creates a LeaderElector from a LeaderElectionConfig func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { if lec.LeaseDuration <= lec.RenewDeadline { return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline") } if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) { return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor") } if lec.LeaseDuration < 1 { return nil, fmt.Errorf("leaseDuration must be greater than zero") } if lec.RenewDeadline < 1 { return nil, fmt.Errorf("renewDeadline must be greater than zero") } if lec.RetryPeriod < 1 { return nil, fmt.Errorf("retryPeriod must be greater than zero") } if lec.Callbacks.OnStartedLeading == nil { return nil, fmt.Errorf("OnStartedLeading callback must not be nil") } if lec.Callbacks.OnStoppedLeading == nil { return nil, fmt.Errorf("OnStoppedLeading callback must not be nil") } if lec.Lock == nil { return nil, fmt.Errorf("Lock must not be nil.") } le := LeaderElector{ config: lec, clock: clock.RealClock{}, metrics: globalMetricsFactory.newLeaderMetrics(), } le.metrics.leaderOff(le.config.Name) return &le, nil } type LeaderElectionConfig struct { // Lock is the resource that will be used for locking Lock rl.Interface // LeaseDuration is the duration that non-leader candidates will // wait to force acquire leadership. This is measured against time of // last observed ack. // // A client needs to wait a full LeaseDuration without observing a change to // the record before it can attempt to take over. When all clients are // shutdown and a new set of clients are started with different names against // the same leader record, they must wait the full LeaseDuration before // attempting to acquire the lease. Thus LeaseDuration should be as short as // possible (within your tolerance for clock skew rate) to avoid a possible // long waits in the scenario. // // Core clients default this value to 15 seconds. LeaseDuration time.Duration // RenewDeadline is the duration that the acting master will retry // refreshing leadership before giving up. // // Core clients default this value to 10 seconds. RenewDeadline time.Duration // RetryPeriod is the duration the LeaderElector clients should wait // between tries of actions. // // Core clients default this value to 2 seconds. RetryPeriod time.Duration // Callbacks are callbacks that are triggered during certain lifecycle // events of the LeaderElector Callbacks LeaderCallbacks // WatchDog is the associated health checker // WatchDog may be null if its not needed/configured. WatchDog *HealthzAdaptor // ReleaseOnCancel should be set true if the lock should be released // when the run context is cancelled. If you set this to true, you must // ensure all code guarded by this lease has successfully completed // prior to cancelling the context, or you may have two processes // simultaneously acting on the critical path. ReleaseOnCancel bool // Name is the name of the resource lock for debugging Name string } // LeaderCallbacks are callbacks that are triggered during certain // lifecycle events of the LeaderElector. These are invoked asynchronously. // // possible future callbacks: // * OnChallenge() type LeaderCallbacks struct { // OnStartedLeading is called when a LeaderElector client starts leading OnStartedLeading func(context.Context) // OnStoppedLeading is called when a LeaderElector client stops leading OnStoppedLeading func() // OnNewLeader is called when the client observes a leader that is // not the previously observed leader. This includes the first observed // leader when the client starts. OnNewLeader func(identity string) } // LeaderElector is a leader election client. type LeaderElector struct { config LeaderElectionConfig // internal bookkeeping observedRecord rl.LeaderElectionRecord observedRawRecord []byte observedTime time.Time // used to implement OnNewLeader(), may lag slightly from the // value observedRecord.HolderIdentity if the transition has // not yet been reported. reportedLeader string // clock is wrapper around time to allow for less flaky testing clock clock.Clock metrics leaderMetricsAdapter // name is the name of the resource lock for debugging name string } // Run starts the leader election loop func (le *LeaderElector) Run(ctx context.Context) { defer runtime.HandleCrash() defer func() { le.config.Callbacks.OnStoppedLeading() }() if !le.acquire(ctx) { return // ctx signalled done } ctx, cancel := context.WithCancel(ctx) defer cancel() go le.config.Callbacks.OnStartedLeading(ctx) le.renew(ctx) } // RunOrDie starts a client with the provided config or panics if the config // fails to validate. func RunOrDie(ctx context.Context, lec LeaderElectionConfig) { le, err := NewLeaderElector(lec) if err != nil { panic(err) } if lec.WatchDog != nil { lec.WatchDog.SetLeaderElection(le) } le.Run(ctx) } // GetLeader returns the identity of the last observed leader or returns the empty string if // no leader has yet been observed. func (le *LeaderElector) GetLeader() string { return le.observedRecord.HolderIdentity } // IsLeader returns true if the last observed leader was this client else returns false. func (le *LeaderElector) IsLeader() bool { return le.observedRecord.HolderIdentity == le.config.Lock.Identity() } // acquire loops calling tryAcquireOrRenew and returns true immediately when tryAcquireOrRenew succeeds. // Returns false if ctx signals done. func (le *LeaderElector) acquire(ctx context.Context) bool { ctx, cancel := context.WithCancel(ctx) defer cancel() succeeded := false desc := le.config.Lock.Describe() klog.Infof("attempting to acquire leader lease %v...", desc) wait.JitterUntil(func() { succeeded = le.tryAcquireOrRenew(ctx) le.maybeReportTransition() if !succeeded { klog.V(4).Infof("failed to acquire lease %v", desc) return } le.config.Lock.RecordEvent("became leader") le.metrics.leaderOn(le.config.Name) klog.Infof("successfully acquired lease %v", desc) cancel() }, le.config.RetryPeriod, JitterFactor, true, ctx.Done()) return succeeded } // renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done. func (le *LeaderElector) renew(ctx context.Context) { ctx, cancel := context.WithCancel(ctx) defer cancel() wait.Until(func() { timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline) defer timeoutCancel() err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) { return le.tryAcquireOrRenew(timeoutCtx), nil }, timeoutCtx.Done()) le.maybeReportTransition() desc := le.config.Lock.Describe() if err == nil { klog.V(5).Infof("successfully renewed lease %v", desc) return } le.config.Lock.RecordEvent("stopped leading") le.metrics.leaderOff(le.config.Name) klog.Infof("failed to renew lease %v: %v", desc, err) cancel() }, le.config.RetryPeriod, ctx.Done()) // if we hold the lease, give it up if le.config.ReleaseOnCancel { le.release() } } // release attempts to release the leader lease if we have acquired it. func (le *LeaderElector) release() bool { if !le.IsLeader() { return true } leaderElectionRecord := rl.LeaderElectionRecord{ LeaderTransitions: le.observedRecord.LeaderTransitions, } if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil { klog.Errorf("Failed to release lock: %v", err) return false } le.observedRecord = leaderElectionRecord le.observedTime = le.clock.Now() return true } // tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired, // else it tries to renew the lease if it has already been acquired. Returns true // on success else returns false. func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { now := metav1.Now() leaderElectionRecord := rl.LeaderElectionRecord{ HolderIdentity: le.config.Lock.Identity(), LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), RenewTime: now, AcquireTime: now, } // 1. obtain or create the ElectionRecord oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx) if err != nil { if !errors.IsNotFound(err) { klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) return false } if err = le.config.Lock.Create(ctx, leaderElectionRecord); err != nil { klog.Errorf("error initially creating leader election record: %v", err) return false } le.observedRecord = leaderElectionRecord le.observedTime = le.clock.Now() return true } // 2. Record obtained, check the Identity & Time if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) { le.observedRecord = *oldLeaderElectionRecord le.observedRawRecord = oldLeaderElectionRawRecord le.observedTime = le.clock.Now() } if len(oldLeaderElectionRecord.HolderIdentity) > 0 && le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && !le.IsLeader() { klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) return false } // 3. We're going to try to update. The leaderElectionRecord is set to it's default // here. Let's correct it before updating. if le.IsLeader() { leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions } else { leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1 } // update the lock itself if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil { klog.Errorf("Failed to update lock: %v", err) return false } le.observedRecord = leaderElectionRecord le.observedTime = le.clock.Now() return true } func (le *LeaderElector) maybeReportTransition() { if le.observedRecord.HolderIdentity == le.reportedLeader { return } le.reportedLeader = le.observedRecord.HolderIdentity if le.config.Callbacks.OnNewLeader != nil { go le.config.Callbacks.OnNewLeader(le.reportedLeader) } } // Check will determine if the current lease is expired by more than timeout. func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error { if !le.IsLeader() { // Currently not concerned with the case that we are hot standby return nil } // If we are more than timeout seconds after the lease duration that is past the timeout // on the lease renew. Time to start reporting ourselves as unhealthy. We should have // died but conditions like deadlock can prevent this. (See #70819) if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease { return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name) } return nil }
[]
[]
[]
[]
[]
go
null
null
null
internal/app/cli/command/create.go
//go:generate go run ../generate/stub_generator.go package command import ( "fmt" "log" "os" "os/exec" "path/filepath" "runtime" "strings" "github.com/TIBCOSoftware/mashling/internal/app/cli/assets" "github.com/TIBCOSoftware/mashling/internal/app/version" "github.com/TIBCOSoftware/mashling/internal/pkg/grpcsupport" gwerrors "github.com/TIBCOSoftware/mashling/internal/pkg/model/errors" "github.com/TIBCOSoftware/mashling/pkg/files" "github.com/TIBCOSoftware/mashling/pkg/strings" "github.com/spf13/cobra" ) const ( // ImportPath is the root import path regardless of location. ImportPath = "github.com/TIBCOSoftware/mashling" // DockerImage is the Docker image used to run the creation process. DockerImage = "mashling/mashling-compile:0.4.0" ) func init() { createCommand.Flags().StringVarP(&name, "name", "n", "mashling-custom", "customized mashling-gateway name") createCommand.Flags().StringVarP(&protoPath, "protoPath", "p", "", "path to proto file for grpc service") createCommand.Flags().BoolVarP(&native, "native", "N", false, "build the customized binary natively instead of using Docker") createCommand.Flags().StringVarP(&targetOS, "os", "O", "", "target OS to build for (default is the host OS, valid values are windows, darwin, and linux)") createCommand.Flags().StringVarP(&targetArch, "arch", "A", "", "target architecture to build for (default is amd64, arm64 is only compatible with Linux)") cliCommand.AddCommand(createCommand) } var ( protoPath string name string native bool targetOS string targetArch string supportedTargetOS = map[string]bool{"windows": true, "darwin": true, "linux": true} supportedTargetArch = map[string]bool{"amd64": true, "arm64": true} ) var createCommand = &cobra.Command{ Use: "create", Short: "Creates a customized mashling-gateway", Long: `Create a reusable customized mashling-gateway binary based off of the dependencies listed in your mashling.json configuration file`, Run: create, } // Create builds a custom mashling-gateway project directory populated with // dependencies listed in the provided Mashling config file. func create(command *cobra.Command, args []string) { var deps []string err := loadGateway() if err != nil { // Attempt to remedy any errors found, particularly missing dependencies. if gateway == nil { log.Fatal(err) } for _, errd := range gateway.Errors() { switch e := errd.(type) { case *gwerrors.MissingDependency: log.Println("Missing dependencies found: ", strings.Join(e.MissingDependencies, " ")) deps = append(deps, e.MissingDependencies...) case *gwerrors.UndefinedReference: log.Fatalf("%s: %s", e.Type(), e.Details()) default: log.Fatalf("Do not know how to handle error type %T!\n", e) } } } pwd, err := os.Getwd() if err != nil { log.Fatal(err) } name = filepath.Join(pwd, name) fullPathName := filepath.Join(name, "src", ImportPath) Env := os.Environ() Env = append(Env, "GOPATH="+name) Env = append(Env, "PATH="+os.Getenv("PATH")+":"+filepath.Join(name, "bin")) if targetOS == "" { targetOS = runtime.GOOS } if targetArch == "" { targetArch = "amd64" } if _, ok := supportedTargetOS[targetOS]; !ok { log.Fatal("invalid target OS type specified") } if _, ok := supportedTargetArch[targetArch]; !ok { log.Fatal("invalid target architecture type specified") } if targetArch == "arm64" && targetOS != "linux" { log.Fatal("arm64 architecture is only valid with linux") } if _, err = os.Stat(fullPathName); os.IsNotExist(err) { err = os.MkdirAll(fullPathName, 0755) if err != nil { log.Fatal(err) } } if _, err = os.Stat(filepath.Join(name, "bin")); os.IsNotExist(err) { err = os.MkdirAll(filepath.Join(name, "bin"), 0755) if err != nil { log.Fatal(err) } } stub, err := assets.Asset("stub.zip") if err != nil { log.Fatal(err) } err = files.UnpackBytes(stub, fullPathName) if err != nil { log.Fatal(err) } // Write version to .version file vFile, err := os.OpenFile(filepath.Join(fullPathName, ".version"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) if err != nil { log.Fatal(err) } defer vFile.Close() if _, err = vFile.WriteString(version.Version); err != nil { log.Fatal(err) } //grpc Support code gRPCFlag := false if len(protoPath) != 0 { gRPCFlag = true } if gRPCFlag { log.Println("Generating grpc support files using proto file: ", protoPath) grpcsupport.AssignValues(name) err := grpcsupport.GenerateSupportFiles(protoPath) if err != nil { log.Fatal(err) } } //grpc Support code end var cmd *exec.Cmd var dockerCmd, dockerContainerID string if dockerCmd, err = exec.LookPath("docker"); native || err != nil { // Docker does not exist, try native toolchain. log.Println("Docker not found or native option specified, using go natively...") dockerCmd = "" } else { log.Println("Docker found, using it to build...") cmd = exec.Command(dockerCmd, "run", "--rm", "-d", "-t", DockerImage) cmd.Dir = name cmd.Env = Env output, cErr := cmd.Output() if cErr != nil { log.Println(string(output)) log.Fatal(cErr) } dockerContainerID = strings.TrimSpace(string(output)) defer func() { log.Println("Stopping container: ", dockerContainerID) // Stop running container. cmd = exec.Command(dockerCmd, "stop", dockerContainerID) cmd.Dir = name cmd.Env = Env output, cErr = cmd.CombinedOutput() if cErr != nil { log.Println(string(output)) log.Fatal(cErr) } }() log.Println("Copying default source code into container:", dockerContainerID) // Copy default source into container. cmd = exec.Command(dockerCmd, "cp", name+"/.", dockerContainerID+":/mashling/") cmd.Dir = name cmd.Env = Env output, cErr = cmd.CombinedOutput() if cErr != nil { log.Println(string(output)) log.Fatal(cErr) } } // Setup environment log.Println("Setting up project...") if dockerCmd != "" { cmd = exec.Command(dockerCmd, "exec", dockerContainerID, "/bin/bash", "-c", "go run build.go setup") } else { cmd = exec.Command("go", "run", "build.go", "setup") } cmd.Dir = fullPathName cmd.Env = Env output, cErr := cmd.CombinedOutput() if cErr != nil { log.Println(string(output)) log.Fatal(cErr) } // Run dep add for all identified new dependencies if len(deps) > 0 { // Turn deps into a string log.Println("Installing missing dependencies...") depString := strings.Join(util.UniqueStrings(deps), " ") if dockerCmd != "" { cmd = exec.Command(dockerCmd, "exec", dockerContainerID, "/bin/bash", "-c", "dep ensure -add "+depString) } else { cmd = exec.Command("dep", "ensure", "-add", depString) } cmd.Dir = fullPathName cmd.Env = Env output, cErr = cmd.CombinedOutput() if cErr != nil { log.Println(string(output)) log.Fatal(cErr) } } // Run make targets to generate appropriate code log.Println("Generating assets for customized Mashling...") if dockerCmd != "" { cmd = exec.Command(dockerCmd, "exec", dockerContainerID, "/bin/bash", "-c", "go run build.go allgatewayprep") } else { cmd = exec.Command("go", "run", "build.go", "allgatewayprep") } cmd.Dir = fullPathName cmd.Env = Env output, cErr = cmd.CombinedOutput() if cErr != nil { log.Println(string(output)) log.Fatal(cErr) } // Run make build target to build for appropriate OS log.Println("Building customized Mashling binary...") if dockerCmd != "" { cmd = exec.Command(dockerCmd, "exec", dockerContainerID, "/bin/bash", "-c", fmt.Sprintf("go run build.go releasegateway -os=%s -arch=%s", targetOS, targetArch)) } else { cmd = exec.Command("go", "run", "build.go", "releasegateway", "-os="+targetOS, "-arch="+targetArch) } cmd.Dir = fullPathName cmd.Env = Env output, cErr = cmd.CombinedOutput() if cErr != nil { log.Println(string(output)) log.Fatal(cErr) } if dockerCmd != "" { log.Println("Copying out created source code and binary from container...") // Copy out created source directory from running container. cmd = exec.Command(dockerCmd, "cp", dockerContainerID+":/mashling/src/"+ImportPath+"/.", filepath.Join(name, "src", ImportPath)) cmd.Dir = name cmd.Env = Env output, cErr = cmd.CombinedOutput() if cErr != nil { log.Println(string(output)) log.Fatal(cErr) } } // Copy release folder contents to top level err = filepath.Walk(filepath.Join(name, "src", ImportPath, "release"), func(path string, info os.FileInfo, err error) error { if !info.IsDir() { err = files.CopyFile(path, filepath.Join(name, info.Name())) if err != nil { log.Fatal(err) } } return nil }) if err != nil { log.Fatal(err) } }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
salesforce/testrunner/settings.py
# Django settings for testrunner project. import os DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', '[email protected]'), ) PERSON_ACCOUNT_ACTIVATED = False DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'salesforce_testrunner_db', }, # The variable DATABASES should be redefined in local_settings with details # in order to protect private secret values from unintentional committing. 'salesforce': { 'ENGINE': 'salesforce.backend', "CONSUMER_KEY": os.environ.get('SF_CONSUMER_KEY', ''), "CONSUMER_SECRET": os.environ.get('SF_CONSUMER_SECRET', ''), 'USER': os.environ.get('SF_USER', ''), 'PASSWORD': os.environ.get('SF_PASSWORD', ''), 'HOST': 'https://login.salesforce.com', } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '6$y&o(28l)#o1_2rafojb_&zxi*jnivkv)ygj#!01kt0ypsxe$' SITE_ID = 1 MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'salesforce.testrunner.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'django.contrib.admindocs', 'salesforce', 'salesforce.testrunner.example', 'salesforce.testrunner.dynamic_models', ) SALESFORCE_DB_ALIAS = 'salesforce' SALESFORCE_QUERY_TIMEOUT = 15 # Maximal number of retries after timeout. # REQUESTS_MAX_RETRIES = 1 DATABASE_ROUTERS = [ "salesforce.router.ModelRouter" ] # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { "console": { "class": "logging.StreamHandler", "level": "DEBUG", }, 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler', 'filters': ['require_debug_false'], } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'salesforce': { 'handlers': ['console'], 'level': 'INFO', 'propagate': True, }, 'salesforce.testrunner': { 'handlers': ['console'], 'level': 'INFO', 'propagate': True, }, }, 'filters': { 'require_debug_false': { "()": "django.utils.log.RequireDebugFalse", } } } # Preventive workaround for some problems with IPv6 by restricting DNS queries # in the Python process only to IPv4, until the support by SFDC become stable. # SFDC enabled IPv6 for a week in March 2014. It caused long delays somewhere. IPV4_ONLY = True # Name of primary key - by default 'id'. The value 'Id' was the default for # version "django-salesforce < 0.5". # SF_PK = 'Id' try: from salesforce.testrunner.local_settings import * # NOQA except ImportError: pass
[]
[]
[ "SF_CONSUMER_SECRET", "SF_CONSUMER_KEY", "SF_PASSWORD", "SF_USER" ]
[]
["SF_CONSUMER_SECRET", "SF_CONSUMER_KEY", "SF_PASSWORD", "SF_USER"]
python
4
0
parser.py
#!/usr/bin/env python #-*- coding: utf-8 -*- """ Script to fetch the daily news from the NHK News Easy website, clean it, upload it to S3 and then submit it to Instapaper. """ import io, os, sys import time import requests import boto from pyquery import PyQuery as pq BUCKET_NAME = os.environ.get("BUCKET_NAME", "sample-bucket") BUCKET_PREFIX = os.environ.get("BUCKET_PREFIX", "sample-prefix") INSTAPAPER_USERNAME = os.environ.get("INSTAPAPER_USERNAME", "user") INSTAPAPER_PASSWORD = os.environ.get("INSTAPAPER_PASSWORD", "password") """ Stream from stdin 'rt' mode = unicode text, 'rb' = binary stream 'rt' mode is line buffered, 'rb' use a smart buffer """ def process_stdin(handler, mode='rt'): sys.stdin = io.open(sys.stdin.fileno(), mode) for chunk in sys.stdin: handler(chunk) """ Process stdin stream line by line """ def read_handler(data): url = data.strip('\n') # Remove the EOL r = requests.get(url) l = [] today = time.strftime('%Y-%m-%d') if len(r.json()) > 0: l = r.json()[0].get(today, []) if not l: print "No news for today." exit(0) for i, item in enumerate(l): if item.get("news_id"): print i, item.get("title", "No Title") print " => ", generate_content_url(url, item["news_id"]) content = fetch_item(generate_content_url(url, item["news_id"])) result = put_item(today, item["news_id"], content) print " Saved at ", result push_to_instapaper(result, item.get("title", None)) """ Find the URL of the content given its base URL and content identifier """ def generate_content_url(base_url, content_id): content_url = base_url.rsplit('/', 1)[0] content_path = "%s/%s.html" % (content_id, content_id) content_url = '/'.join([content_url, content_path]) return content_url """ Download the article """ def fetch_item(url): r = requests.get(url) r.encoding = "utf-8" content = prettify(r.text) return content """ Remove furigana + random elements """ def prettify(html): html = pq(html) html("#main rt").empty() # Remove furigana html("ruby").each(lambda i, e: pq(e).replaceWith(pq(e).text())) # Unwrap ruby html("#main span").each(lambda i, e: pq(e).replaceWith(pq(e).text())) # Unwrap spans html("#main a").each(lambda i, e: pq(e).replaceWith(pq(e).text())) # Unwrap links html("#soundkana").empty() # Remove sound article = html("#main") html("body").empty().append(article) # Put the content in the body return html.html() """ Upload the content on S3 """ def put_item(date, uid, content): from boto.s3.key import Key try: c = boto.connect_s3() b = c.get_bucket(BUCKET_NAME) k = Key(b) k.key = "%s/%s/%s.html" % (BUCKET_PREFIX, date, uid) k.content_type = "text/html" k.set_contents_from_string(content) #print "Stored at ", k.key return k.generate_url(3600*732, force_http=True) except Exception as e: print "S3 Upload Failed: ", e """ Push URL to Instapaper """ def push_to_instapaper(url, title=None): r = requests.get("https://www.instapaper.com/api/add", auth=(INSTAPAPER_USERNAME, INSTAPAPER_PASSWORD), params={'url': url, 'title': title}) if r.status_code != 201: print "HTTP %i: Failed to push to Instapaper" % r.status_code def main(): process_stdin(read_handler) if __name__ == "__main__": try: main() except KeyboardInterrupt: exit('')
[]
[]
[ "INSTAPAPER_USERNAME", "BUCKET_PREFIX", "INSTAPAPER_PASSWORD", "BUCKET_NAME" ]
[]
["INSTAPAPER_USERNAME", "BUCKET_PREFIX", "INSTAPAPER_PASSWORD", "BUCKET_NAME"]
python
4
0
backend/venv/lib/python3.9/site-packages/pip/_vendor/pep517/_in_process.py
"""This is invoked in a subprocess to call the build backend hooks. It expects: - Command line args: hook_name, control_dir - Environment variables: PEP517_BUILD_BACKEND=entry.point:spec PEP517_BACKEND_PATH=paths (separated with os.pathsep) - control_dir/input.json: - {"kwargs": {...}} Results: - control_dir/output.json - {"return_val": ...} """ import json import os import os.path import re import shutil import sys import traceback from glob import glob from importlib import import_module from os.path import join as pjoin # This file is run as a script, and `import compat` is not zip-safe, so we # include write_json() and read_json() from compat.py. # # Handle reading and writing JSON in UTF-8, on Python 3 and 2. if sys.version_info[0] >= 3: # Python 3 def write_json(obj, path, **kwargs): with open(path, 'w', encoding='utf-8') as f: json.dump(obj, f, **kwargs) def read_json(path): with open(path, 'r', encoding='utf-8') as f: return json.load(f) else: # Python 2 def write_json(obj, path, **kwargs): with open(path, 'wb') as f: json.dump(obj, f, encoding='utf-8', **kwargs) def read_json(path): with open(path, 'rb') as f: return json.load(f) class BackendUnavailable(Exception): """Raised if we cannot import the backend""" def __init__(self, traceback): self.traceback = traceback class BackendInvalid(Exception): """Raised if the backend is invalid""" def __init__(self, message): self.message = message class HookMissing(Exception): """Raised if a hook is missing and we are not executing the fallback""" def contained_in(filename, directory): """Test if a file is located within the given directory.""" filename = os.path.normcase(os.path.abspath(filename)) directory = os.path.normcase(os.path.abspath(directory)) return os.path.commonprefix([filename, directory]) == directory def _build_backend(): """Find and load the build backend""" # Add in-tree backend directories to the front of sys.path. backend_path = os.environ.get('PEP517_BACKEND_PATH') if backend_path: extra_pathitems = backend_path.split(os.pathsep) sys.path[:0] = extra_pathitems ep = os.environ['PEP517_BUILD_BACKEND'] mod_path, _, obj_path = ep.partition(':') try: obj = import_module(mod_path) except ImportError: raise BackendUnavailable(traceback.format_exc()) if backend_path: if not any( contained_in(obj.__file__, path) for path in extra_pathitems ): raise BackendInvalid("Backend was not loaded from backend-path") if obj_path: for path_part in obj_path.split('.'): obj = getattr(obj, path_part) return obj def get_requires_for_build_wheel(config_settings): """Invoke the optional get_requires_for_build_wheel hook Returns [] if the hook is not defined. """ backend = _build_backend() try: hook = backend.get_requires_for_build_wheel except AttributeError: return [] else: return hook(config_settings) def prepare_metadata_for_build_wheel( metadata_directory, config_settings, _allow_fallback): """Invoke optional prepare_metadata_for_build_wheel Implements a fallback by building a wheel if the hook isn't defined, unless _allow_fallback is False in which case HookMissing is raised. """ backend = _build_backend() try: hook = backend.prepare_metadata_for_build_wheel except AttributeError: if not _allow_fallback: raise HookMissing() return _get_wheel_metadata_from_wheel(backend, metadata_directory, config_settings) else: return hook(metadata_directory, config_settings) WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' def _dist_info_files(whl_zip): """Identify the .dist-info folder inside a wheel ZipFile.""" res = [] for path in whl_zip.namelist(): m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) if m: res.append(path) if res: return res raise Exception("No .dist-info folder found in wheel") def _get_wheel_metadata_from_wheel( backend, metadata_directory, config_settings): """Build a wheel and extract the metadata from it. Fallback for when the build backend does not define the 'get_wheel_metadata' hook. """ from zipfile import ZipFile whl_basename = backend.build_wheel(metadata_directory, config_settings) with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): pass # Touch marker file whl_file = os.path.join(metadata_directory, whl_basename) with ZipFile(whl_file) as zipf: dist_info = _dist_info_files(zipf) zipf.extractall(path=metadata_directory, members=dist_info) return dist_info[0].split('/')[0] def _find_already_built_wheel(metadata_directory): """Check for a wheel already built during the get_wheel_metadata hook. """ if not metadata_directory: return None metadata_parent = os.path.dirname(metadata_directory) if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): return None whl_files = glob(os.path.join(metadata_parent, '*.whl')) if not whl_files: print('Found wheel built marker, but no .whl files') return None if len(whl_files) > 1: print('Found multiple .whl files; unspecified behaviour. ' 'Will call build_wheel.') return None # Exactly one .whl file return whl_files[0] def build_wheel(wheel_directory, config_settings, metadata_directory=None): """Invoke the mandatory build_wheel hook. If a wheel was already built in the prepare_metadata_for_build_wheel fallback, this will copy it rather than rebuilding the wheel. """ prebuilt_whl = _find_already_built_wheel(metadata_directory) if prebuilt_whl: shutil.copy2(prebuilt_whl, wheel_directory) return os.path.basename(prebuilt_whl) return _build_backend().build_wheel(wheel_directory, config_settings, metadata_directory) def get_requires_for_build_sdist(config_settings): """Invoke the optional get_requires_for_build_wheel hook Returns [] if the hook is not defined. """ backend = _build_backend() try: hook = backend.get_requires_for_build_sdist except AttributeError: return [] else: return hook(config_settings) class _DummyException(Exception): """Nothing should ever raise this exception""" class GotUnsupportedOperation(Exception): """For internal use when backend raises UnsupportedOperation""" def __init__(self, traceback): self.traceback = traceback def build_sdist(sdist_directory, config_settings): """Invoke the mandatory build_sdist hook.""" backend = _build_backend() try: return backend.build_sdist(sdist_directory, config_settings) except getattr(backend, 'UnsupportedOperation', _DummyException): raise GotUnsupportedOperation(traceback.format_exc()) HOOK_NAMES = { 'get_requires_for_build_wheel', 'prepare_metadata_for_build_wheel', 'build_wheel', 'get_requires_for_build_sdist', 'build_sdist', } def main(): if len(sys.argv) < 3: sys.exit("Needs args: hook_name, control_dir") hook_name = sys.argv[1] control_dir = sys.argv[2] if hook_name not in HOOK_NAMES: sys.exit("Unknown hook: %s" % hook_name) hook = globals()[hook_name] hook_input = read_json(pjoin(control_dir, 'input.json')) json_out = {'unsupported': False, 'return_val': None} try: json_out['return_val'] = hook(**hook_input['kwargs']) except BackendUnavailable as e: json_out['no_backend'] = True json_out['traceback'] = e.traceback except BackendInvalid as e: json_out['backend_invalid'] = True json_out['backend_error'] = e.message except GotUnsupportedOperation as e: json_out['unsupported'] = True json_out['traceback'] = e.traceback except HookMissing: json_out['hook_missing'] = True write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) if __name__ == '__main__': main()
[]
[]
[ "PEP517_BUILD_BACKEND", "PEP517_BACKEND_PATH" ]
[]
["PEP517_BUILD_BACKEND", "PEP517_BACKEND_PATH"]
python
2
0
controller.go
package main import ( "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "strings" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/strategicpatch" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/rest" clientgocache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/record" "k8s.io/klog" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint" ) func kubeInit() *kubernetes.Clientset { var err error var config *rest.Config var kubeconfigFile string = os.Getenv("KUBECONFIG") if _, err = os.Stat(kubeconfigFile); err != nil { klog.V(3).Infof("kubeconfig %s failed to find due to %v", kubeconfigFile, err) config, err = rest.InClusterConfig() if err != nil { klog.Fatalf("Failed due to %v", err) } } else { config, err = clientcmd.BuildConfigFromFlags("", kubeconfigFile) if err != nil { klog.Fatalf("Failed due to %v", err) } } clientset, err := kubernetes.NewForConfig(config) if err != nil { klog.Fatalf("Failed due to %v", err) } return clientset } type controller struct { devicePlugin *NvidiaDevicePlugin clientset *kubernetes.Clientset // podLister can list/get pods from the shared informer's store. podLister corelisters.PodLister // recorder is an event recorder for recording Event resources to the // Kubernetes API. recorder record.EventRecorder // podInformerSynced returns true if the pod store has been synced at least once. podInformerSynced clientgocache.InformerSynced // podQueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This // means we can ensure we only process a fixed amount of resources at a // time, and makes it easy to ensure we are never processing the same item // simultaneously in two different workers. //podQueue workqueue.RateLimitingInterface } func newController(dp *NvidiaDevicePlugin, kubeClient *kubernetes.Clientset, kubeInformerFactory kubeinformers.SharedInformerFactory, stopCh <-chan struct{}) (*controller, error) { klog.Info("Creating event broadcaster") eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "gpu-topo-device-plugin"}) c := &controller{ devicePlugin: dp, clientset: kubeClient, //podQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "podQueue"), recorder: recorder, } // Create pod informer. podInformer := kubeInformerFactory.Core().V1().Pods() podInformer.Informer().AddEventHandler(clientgocache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { switch t := obj.(type) { case *v1.Pod: if t.Spec.NodeName != dp.nodeName { return false } return IsGPUTopoPod(t) case clientgocache.DeletedFinalStateUnknown: if pod, ok := t.Obj.(*v1.Pod); ok { if pod.Spec.NodeName != dp.nodeName { return false } return IsGPUTopoPod(pod) } runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c)) return false default: runtime.HandleError(fmt.Errorf("unable to handle object in %T: %T", c, obj)) return false } }, Handler: clientgocache.ResourceEventHandlerFuncs{ DeleteFunc: c.deletePodFunc, UpdateFunc: c.updatePodFunc, }, }) c.podLister = podInformer.Lister() c.podInformerSynced = podInformer.Informer().HasSynced // Start informer goroutines. go kubeInformerFactory.Start(stopCh) if ok := clientgocache.WaitForCacheSync(stopCh, c.podInformerSynced); !ok { return nil, fmt.Errorf("failed to wait for pod caches to sync") } klog.Infoln("init the pod cache successfully") return c, nil } // Run will set up the event handlers func (c *controller) Run(threadiness int, stopCh <-chan struct{}) error { defer runtime.HandleCrash() klog.Infoln("Starting Topology Controller.") klog.Infoln("Waiting for informer caches to sync") klog.Infof("Starting %v workers.", threadiness) klog.Infoln("Started workers") <-stopCh klog.Infoln("Shutting down workers") return nil } func (c *controller) deletePodFunc(obj interface{}) { var pod *v1.Pod switch t := obj.(type) { case *v1.Pod: pod = t case clientgocache.DeletedFinalStateUnknown: var ok bool pod, ok = t.Obj.(*v1.Pod) if !ok { klog.Warningf("cannot convert to *v1.Pod: %v", t.Obj) return } default: klog.Warningf("cannot convert to *v1.Pod: %v", t) return } delDevs := strings.Split(pod.Annotations[resourceName], ",") klog.V(2).Infof("delete pod %s in ns %s, deleted devs: %v", pod.Name, pod.Namespace, delDevs) if err := c.devicePlugin.UpdatePodDevice(nil, delDevs); err != nil { klog.Errorf("Failed to update PCI device: %v", err) } return } func (c *controller) updatePodFunc(o, obj interface{}) { var pod *v1.Pod switch t := obj.(type) { case *v1.Pod: pod = t default: klog.Warningf("cannot convert to *v1.Pod: %v", t) return } klog.V(2).Infof("add pod[%v]", pod.UID) var kubeletDeviceManagerCheckpoint = filepath.Join(pluginapi.DevicePluginPath, "kubelet_internal_checkpoint") registeredDevs := make(map[string][]string) devEntries := make([]checkpoint.PodDevicesEntry, 0) cp := checkpoint.New(devEntries, registeredDevs) blob, err := ioutil.ReadFile(kubeletDeviceManagerCheckpoint) if err != nil { klog.Errorf("Failed to read content from %s: %v", kubeletDeviceManagerCheckpoint, err) return } err = cp.UnmarshalCheckpoint(blob) if err != nil { klog.Errorf("Failed to unmarshal content: %v", err) return } var env = []string{} data, _ := cp.GetData() for _, pde := range data { if pde.PodUID != string(pod.UID) { continue } for _, devID := range pde.DeviceIDs { if val, ok := c.devicePlugin.shadowMap[devID]; ok && val != "" { env = append(env, val) delete(c.devicePlugin.shadowMap, devID) } } } klog.V(2).Infof("Pod[%v] want to be updated: %v", pod.UID, env) if len(env) == 0 { return } old := pod.DeepCopy() if pod.Annotations == nil { pod.Annotations = make(map[string]string, 0) } pod.Annotations[resourceName] = strings.Join(env, ",") // update pod annotation err = patchPodObject(c.clientset, old, pod) if err != nil { klog.Error(err) } } func patchPodObject(c kubernetes.Interface, cur, mod *v1.Pod) error { curJson, err := json.Marshal(cur) if err != nil { return err } modJson, err := json.Marshal(mod) if err != nil { return err } patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, v1.Pod{}) if err != nil { return err } if len(patch) == 0 || string(patch) == "{}" { return nil } klog.V(3).Infof("Patching Pod %s/%s with %s", cur.Namespace, cur.Name, string(patch)) _, err = c.CoreV1().Pods(cur.Namespace).Patch(cur.Name, types.StrategicMergePatchType, patch) return err }
[ "\"KUBECONFIG\"" ]
[]
[ "KUBECONFIG" ]
[]
["KUBECONFIG"]
go
1
0
pdb/reader_test.go
package pdb import ( "fmt" "os" "testing" ) func TestReader(t *testing.T) { file, err := os.Open("5ujw.pdb") if err != nil { t.Error(err) } r := NewReader(file) models, err := r.ReadAll() if err != nil { t.Error(err) } for _, model := range models { fmt.Println(len(model.Atoms)) fmt.Println(len(model.HetAtoms)) fmt.Println(len(model.Connections)) fmt.Println(len(model.Helixes)) fmt.Println(len(model.Strands)) fmt.Println(len(model.BioMatrixes)) fmt.Println(len(model.SymMatrixes)) fmt.Println(len(model.Residues)) fmt.Println(len(model.Chains)) fmt.Println() } }
[]
[]
[]
[]
[]
go
null
null
null
pyghmi/redfish/command.py
# coding: utf8 # Copyright 2019 Lenovo # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The command module for redfish systems. Provides https-only support for redfish compliant endpoints """ import base64 from datetime import datetime from datetime import timedelta from fnmatch import fnmatch import json import os import re import socket import struct import sys import time from dateutil import tz import pyghmi.constants as const import pyghmi.exceptions as exc import pyghmi.redfish.oem.lookup as oem from pyghmi.util.parse import parse_time import pyghmi.util.webclient as webclient numregex = re.compile('([0-9]+)') powerstates = { 'on': 'On', 'off': 'ForceOff', 'softoff': 'GracefulShutdown', 'shutdown': 'GracefulShutdown', 'reset': 'ForceRestart', 'boot': None, } boot_devices_write = { 'net': 'Pxe', 'network': 'Pxe', 'pxe': 'Pxe', 'hd': 'Hdd', 'usb': 'Usb', 'cd': 'Cd', 'cdrom': 'Cd', 'optical': 'Cd', 'dvd': 'Cd', 'floppy': 'Floppy', 'default': 'None', 'setup': 'BiosSetup', 'bios': 'BiosSetup', 'f1': 'BiosSetup', } boot_devices_read = { 'BiosSetup': 'setup', 'Cd': 'optical', 'Floppy': 'floppy', 'Hdd': 'hd', 'None': 'default', 'Pxe': 'network', 'Usb': 'usb', 'SDCard': 'sdcard', } _healthmap = { 'Critical': const.Health.Critical, 'Unknown': const.Health.Warning, 'Warning': const.Health.Warning, 'OK': const.Health.Ok, } def _mask_to_cidr(mask): maskn = socket.inet_pton(socket.AF_INET, mask) maskn = struct.unpack('!I', maskn)[0] cidr = 32 while maskn & 0b1 == 0 and cidr > 0: cidr -= 1 maskn >>= 1 return cidr def _to_boolean(attrval): attrval = attrval.lower() if not attrval: return False if ('true'.startswith(attrval) or 'yes'.startswith(attrval) or 'enabled'.startswith(attrval) or attrval == '1'): return True if ('false'.startswith(attrval) or 'no'.startswith(attrval) or 'disabled'.startswith(attrval) or attrval == '0'): return False raise Exception( 'Unrecognized candidate for boolean: {0}'.format(attrval)) def _cidr_to_mask(cidr): return socket.inet_ntop( socket.AF_INET, struct.pack( '!I', (2**32 - 1) ^ (2**(32 - cidr) - 1))) def naturalize_string(key): """Analyzes string in a human way to enable natural sort :param nodename: The node name to analyze :returns: A structure that can be consumed by 'sorted' """ return [int(text) if text.isdigit() else text.lower() for text in re.split(numregex, key)] def natural_sort(iterable): """Return a sort using natural sort if possible :param iterable: :return: """ try: return sorted(iterable, key=naturalize_string) except TypeError: # The natural sort attempt failed, fallback to ascii sort return sorted(iterable) class SensorReading(object): def __init__(self, healthinfo, sensor=None, value=None, units=None, unavailable=False): if sensor: self.name = sensor['name'] else: self.name = healthinfo['Name'] self.health = _healthmap.get(healthinfo.get( 'Status', {}).get('Health', None), const.Health.Warning) self.states = [healthinfo.get('Status', {}).get('Health', 'Unknown')] self.health = _healthmap[healthinfo['Status']['Health']] self.states = [healthinfo['Status']['Health']] self.value = value self.state_ids = None self.imprecision = None self.units = units self.unavailable = unavailable class AttrDependencyHandler(object): def __init__(self, dependencies, currsettings, pendingsettings): self.dependencymap = {} for dep in dependencies.get('Dependencies', [{}]): if 'Dependency' not in dep: continue if dep['Type'] != 'Map': continue if dep['DependencyFor'] in self.dependencymap: self.dependencymap[ dep['DependencyFor']].append(dep['Dependency']) else: self.dependencymap[ dep['DependencyFor']] = [dep['Dependency']] self.curr = currsettings self.pend = pendingsettings self.reg = dependencies['Attributes'] def get_overrides(self, setting): overrides = {} blameattrs = [] if setting not in self.dependencymap: return {}, [] for depinfo in self.dependencymap[setting]: lastoper = None lastcond = None for mapfrom in depinfo.get('MapFrom', []): if lastcond is not None and not lastoper: break # MapTerm required to make sense of this, give up currattr = mapfrom['MapFromAttribute'] blameattrs.append(currattr) currprop = mapfrom['MapFromProperty'] if currprop == 'CurrentValue': if currattr in self.pend: currval = self.pend[currattr] else: currval = self.curr[currattr] else: currval = self.reg[currattr][currprop] lastcond = self.process(currval, mapfrom, lastcond, lastoper) lastoper = mapfrom.get('MapTerms', None) if lastcond: if setting not in overrides: overrides[setting] = {} if depinfo['MapToAttribute'] not in overrides[setting]: overrides[depinfo['MapToAttribute']] = {} overrides[depinfo['MapToAttribute']][ depinfo['MapToProperty']] = depinfo['MapToValue'] return overrides, blameattrs def process(self, currval, mapfrom, lastcond, lastoper): newcond = None mfc = mapfrom['MapFromCondition'] if mfc == 'EQU': newcond = currval == mapfrom['MapFromValue'] if mfc == 'NEQ': newcond = currval != mapfrom['MapFromValue'] if mfc == 'GEQ': newcond = float(currval) >= float(mapfrom['MapFromValue']) if mfc == 'GTR': newcond = float(currval) > float(mapfrom['MapFromValue']) if mfc == 'LEQ': newcond = float(currval) <= float(mapfrom['MapFromValue']) if mfc == 'LSS': newcond = float(currval) < float(mapfrom['MapFromValue']) if lastcond is not None: if lastoper == 'AND': return lastcond and newcond elif lastoper == 'OR': return lastcond or newcond return None return newcond class Command(object): def __init__(self, bmc, userid, password, verifycallback, sysurl=None, bmcurl=None, chassisurl=None, pool=None, port=443): self.wc = webclient.SecureHTTPConnection( bmc, port, verifycallback=verifycallback) self._hwnamemap = {} self._fwnamemap = {} self._urlcache = {} self._varbmcurl = bmcurl self._varbiosurl = None self._varbmcnicurl = None self._varsetbiosurl = None self._varchassisurl = chassisurl self._varresetbmcurl = None self._varupdateservice = None self._varfwinventory = None self._oem = None self._gpool = pool self.wc.set_header('Accept', 'application/json') self.wc.set_header('User-Agent', 'pyghmi') self.wc.set_header('Accept-Encoding', 'gzip') self.wc.set_header('OData-Version', '4.0') overview = self.wc.grab_json_response('/redfish/v1/') self.wc.set_basic_credentials(userid, password) self.username = userid self.password = password self.wc.set_header('Content-Type', 'application/json') if 'Systems' not in overview: raise exc.PyghmiException('Redfish not ready') systems = overview['Systems']['@odata.id'] res = self.wc.grab_json_response_with_status(systems) if res[1] == 401: raise exc.PyghmiException('Access Denied') elif res[1] < 200 or res[1] >= 300: raise exc.PyghmiException(repr(res[0])) members = res[0] self._varsensormap = {} systems = members['Members'] if sysurl: for system in systems: if system['@odata.id'] == sysurl: self.sysurl = sysurl break else: raise exc.PyghmiException( 'Specified sysurl not found: {0}'.format(sysurl)) else: if len(systems) != 1: raise exc.PyghmiException( 'Multi system manager, sysurl is required parameter') self.sysurl = systems[0]['@odata.id'] self.powerurl = self.sysinfo.get('Actions', {}).get( '#ComputerSystem.Reset', {}).get('target', None) @property def _accountserviceurl(self): sroot = self._do_web_request('/redfish/v1/') return sroot.get('AccountService', {}).get('@odata.id', None) @property def _validroles(self): okroles = set([]) roleurl = self._do_web_request(self._accountserviceurl).get( 'Roles', {}).get('@odata.id', None) if roleurl: roles = self._do_web_request(roleurl).get('Members', []) for role in roles: role = role.get('@odata.id', '') if not role: continue okroles.add(role.split('/')[-1]) if not okroles: okroles.add('Administrator') okroles.add('Operator') okroles.add('ReadOnly') return okroles def get_users(self): """get list of users and channel access information (helper) :param channel: number [1:7] :return: name: (str) uid: (int) channel: (int) access: callback (bool) link_auth (bool) ipmi_msg (bool) privilege_level: (str)[callback, user, operatorm administrator, proprietary, no_access] """ srvurl = self._accountserviceurl names = {} if srvurl: srvinfo = self._do_web_request(srvurl) srvurl = srvinfo.get('Accounts', {}).get('@odata.id', None) if srvurl: srvinfo = self._do_web_request(srvurl) accounts = srvinfo.get('Members', []) for account in accounts: accinfo = self._do_web_request(account['@odata.id']) currname = accinfo.get('UserName', '') currid = accinfo.get('Id', None) if currname: names[currid] = { 'name': currname, 'uid': currid, 'expiration': self.oem.get_user_expiration(currid), 'access': { 'privilege_level': accinfo.get('RoleId', 'Unknown') } } return names def _account_url_info_by_id(self, uid): srvurl = self._accountserviceurl if srvurl: srvinfo = self._do_web_request(srvurl) srvurl = srvinfo.get('Accounts', {}).get('@odata.id', None) if srvurl: srvinfo = self._do_web_request(srvurl) accounts = srvinfo.get('Members', []) for account in accounts: accinfo = self._do_web_request(account['@odata.id']) currid = accinfo.get('Id', None) if str(currid) == str(uid): accinfo['expiration'] = self.oem.get_user_expiration( uid) return account['@odata.id'], accinfo def get_user(self, uid): srvurl = self._accountserviceurl if srvurl: srvinfo = self._do_web_request(srvurl) srvurl = srvinfo.get('Accounts', {}).get('@odata.id', None) if srvurl: srvinfo = self._do_web_request(srvurl) accounts = srvinfo.get('Members', []) for account in accounts: accinfo = self._do_web_request(account['@odata.id']) currname = accinfo.get('UserName', '') currid = accinfo.get('Id', None) if str(currid) == str(uid): return {'name': currname, 'uid': uid, 'expiration': self.oem.get_user_expiration( uid), 'access': { 'privilege_level': accinfo.get( 'RoleId', 'Unknown')}} def set_user_password(self, uid, mode='set_password', password=None): """Set user password and (modes) :param uid: id number of user. see: get_names_uid()['name'] :param mode: disable = disable user connections enable = enable user connections set_password = set or ensure password :param password: Password (optional when mode is [disable or enable]) :return: True on success """ accinfo = self._account_url_info_by_id(uid) if not accinfo: raise Exception("No such account found") etag = accinfo[1].get('@odata.etag', None) if mode == 'set_password': self._do_web_request(accinfo[0], {'Password': password}, method='PATCH', etag=etag) elif mode == 'disable': self._do_web_request(accinfo[0], {'Enabled': False}, method='PATCH', etag=etag) elif mode == 'enable': self._do_web_request(accinfo[0], {'Enabled': True}, method='PATCH', etag=etag) return True def disable_user(self, uid, mode): """Disable User Just disable the User. This will not disable the password or revoke privileges. :param uid: user id :param mode: disable = disable user connections enable = enable user connections """ self.set_user_password(uid, mode) return True def set_user_access(self, uid, privilege_level='ReadOnly'): accinfo = self._account_url_info_by_id(uid) if not accinfo: raise Exception("Unable to find indicated uid") etag = accinfo[1].get('@odata.etag', None) for role in self._validroles: if role.lower() == privilege_level.lower(): privilege_level = role break self._do_web_request(accinfo[0], {'RoleId': privilege_level}, method='PATCH', etag=etag) def create_user(self, uid, name, password, privilege_level='ReadOnly'): """create/ensure a user is created with provided settings :param privilege_level: User Privilege level. Redfish role, commonly Administrator, Operator, and ReadOnly """ accinfo = self._account_url_info_by_id(uid) if not accinfo: raise Exception("Unable to find indicated uid") for role in self._validroles: if role.lower() == privilege_level.lower(): privilege_level = role break etag = accinfo[1].get('@odata.etag', None) userinfo = { "UserName": name, "Password": password, "RoleId": privilege_level, } self._do_web_request(accinfo[0], userinfo, method='PATCH', etag=etag) return True def user_delete(self, uid): # Redfish doesn't do so well with Deleting users either... # Blanking the username seems to be the convention # First, set a bogus password in case the implementation does honor # blank user, at least render such an account harmless self.set_user_password(uid, base64.b64encode(os.urandom(15))) self.set_user_name(uid, '') return True def set_user_name(self, uid, name): """Set user name :param uid: user id :param name: username """ accinfo = self._account_url_info_by_id(uid) if not accinfo: raise Exception("No such account found") etag = accinfo[1].get('@odata.etag', None) self._do_web_request(accinfo[0], {'UserName': name}, method='PATCH', etag=etag) return True @property def _updateservice(self): if not self._varupdateservice: overview = self._do_web_request('/redfish/v1/') us = overview.get('UpdateService', {}).get('@odata.id', None) if not us: raise exc.UnsupportedFunctionality( 'BMC does not implement extended firmware information') self._varupdateservice = us return self._varupdateservice @property def _fwinventory(self): if not self._varfwinventory: usi = self._do_web_request(self._updateservice) self._varfwinventory = usi.get('FirmwareInventory', {}).get( '@odata.id', None) if not self._varfwinventory: raise exc.UnsupportedFunctionality( 'BMC does not implement extended firmware information') return self._varfwinventory @property def sysinfo(self): return self._do_web_request(self.sysurl) @property def bmcinfo(self): return self._do_web_request(self._bmcurl) def get_power(self): currinfo = self._do_web_request(self.sysurl, cache=False) return {'powerstate': str(currinfo['PowerState'].lower())} def reseat_bay(self, bay): """Request the reseat of a bay Request the enclosure manager to reseat the system in a particular bay. :param bay: The bay identifier to reseat :return: """ self.oem.reseat_bay(bay) def set_power(self, powerstate, wait=False): if powerstate == 'boot': oldpowerstate = self.get_power()['powerstate'] powerstate = 'on' if oldpowerstate == 'off' else 'reset' elif powerstate in ('on', 'off'): oldpowerstate = self.get_power()['powerstate'] if oldpowerstate == powerstate: return {'powerstate': powerstate} reqpowerstate = powerstate if powerstate not in powerstates: raise exc.InvalidParameterValue( "Unknown power state %s requested" % powerstate) powerstate = powerstates[powerstate] result = self.wc.grab_json_response_with_status( self.powerurl, {'ResetType': powerstate}) if result[1] < 200 or result[1] >= 300: raise exc.PyghmiException(result[0]) if wait and reqpowerstate in ('on', 'off', 'softoff', 'shutdown'): if reqpowerstate in ('softoff', 'shutdown'): reqpowerstate = 'off' timeout = os.times()[4] + 300 while (self.get_power()['powerstate'] != reqpowerstate and os.times()[4] < timeout): time.sleep(1) if self.get_power()['powerstate'] != reqpowerstate: raise exc.PyghmiException( "System did not accomplish power state change") return {'powerstate': reqpowerstate} return {'pendingpowerstate': reqpowerstate} def _get_cache(self, url, cache=30): now = os.times()[4] cachent = self._urlcache.get(url, None) if cachent and cachent['vintage'] > now - cache: return cachent['contents'] return None def _do_bulk_requests(self, urls, cache=True): if self._gpool: urls = [(x, None, None, cache) for x in urls] for res in self._gpool.starmap(self._do_web_request_withurl, urls): yield res else: for url in urls: yield self._do_web_request_withurl(url, cache=cache) def _do_web_request_withurl(self, url, payload=None, method=None, cache=True): return self._do_web_request(url, payload, method, cache), url def _do_web_request(self, url, payload=None, method=None, cache=True, etag=None): res = None if cache and payload is None and method is None: res = self._get_cache(url, cache) if res: return res wc = self.wc.dupe() if etag: wc.stdheaders['If-Match'] = etag try: res = wc.grab_json_response_with_status(url, payload, method=method) finally: if 'If-Match' in wc.stdheaders: del wc.stdheaders['If-Match'] if res[1] < 200 or res[1] >= 300: try: info = json.loads(res[0]) errmsg = [ x.get('Message', x['MessageId']) for x in info.get( 'error', {}).get('@Message.ExtendedInfo', {})] msgid = [ x['MessageId'] for x in info.get( 'error', {}).get('@Message.ExtendedInfo', {})] errmsg = ','.join(errmsg) msgid = ','.join(msgid) raise exc.RedfishError(errmsg, msgid=msgid) except (ValueError, KeyError): raise exc.PyghmiException(str(url) + ":" + res[0]) if payload is None and method is None: self._urlcache[url] = {'contents': res[0], 'vintage': os.times()[4]} return res[0] def get_bootdev(self): """Get current boot device override information. :raises: PyghmiException on error :returns: dict """ result = self._do_web_request(self.sysurl) overridestate = result.get('Boot', {}).get( 'BootSourceOverrideEnabled', None) if overridestate == 'Disabled': return {'bootdev': 'default', 'persistent': True} persistent = None if overridestate == 'Once': persistent = False elif overridestate == 'Continuous': persistent = True else: raise exc.PyghmiException('Unrecognized Boot state: %s' % repr(overridestate)) uefimode = result.get('Boot', {}).get('BootSourceOverrideMode', None) if uefimode == 'UEFI': uefimode = True elif uefimode == 'Legacy': uefimode = False else: raise exc.PyghmiException('Unrecognized mode: %s' % uefimode) bootdev = result.get('Boot', {}).get('BootSourceOverrideTarget', None) if bootdev not in boot_devices_read: raise exc.PyghmiException('Unrecognized boot target: %s' % repr(bootdev)) bootdev = boot_devices_read[bootdev] return {'bootdev': bootdev, 'persistent': persistent, 'uefimode': uefimode} def set_bootdev(self, bootdev, persist=False, uefiboot=None): """Set boot device to use on next reboot :param bootdev: *network -- Request network boot *hd -- Boot from hard drive *safe -- Boot from hard drive, requesting 'safe mode' *optical -- boot from CD/DVD/BD drive *setup -- Boot into setup utility *default -- remove any directed boot device request :param persist: If true, ask that system firmware use this device beyond next boot. Be aware many systems do not honor this :param uefiboot: If true, request UEFI boot explicitly. If False, request BIOS style boot. None (default) does not modify the boot mode. :raises: PyghmiException on an error. :returns: dict or True -- If callback is not provided, the response """ reqbootdev = bootdev if (bootdev not in boot_devices_write and bootdev not in boot_devices_read): raise exc.InvalidParameterValue('Unsupported device %s' % repr(bootdev)) bootdev = boot_devices_write.get(bootdev, bootdev) if bootdev == 'None': payload = {'Boot': {'BootSourceOverrideEnabled': 'Disabled'}} else: payload = {'Boot': { 'BootSourceOverrideEnabled': 'Continuous' if persist else 'Once', 'BootSourceOverrideTarget': bootdev, }} if uefiboot is not None: uefiboot = 'UEFI' if uefiboot else 'Legacy' payload['BootSourceOverrideMode'] = uefiboot try: self._do_web_request(self.sysurl, payload, method='PATCH') return {'bootdev': reqbootdev} except Exception: del payload['BootSourceOverrideMode'] thetag = self.sysinfo.get('@odata.etag', None) self._do_web_request(self.sysurl, payload, method='PATCH', etag=thetag) return {'bootdev': reqbootdev} @property def _biosurl(self): if not self._varbiosurl: self._varbiosurl = self.sysinfo.get('Bios', {}).get('@odata.id', None) if self._varbiosurl is None: raise exc.UnsupportedFunctionality( 'Bios management not detected on this platform') return self._varbiosurl @property def _setbiosurl(self): if self._varsetbiosurl is None: biosinfo = self._do_web_request(self._biosurl) self._varsetbiosurl = biosinfo.get( '@Redfish.Settings', {}).get('SettingsObject', {}).get( '@odata.id', None) if self._varsetbiosurl is None: raise exc.UnsupportedFunctionality('Ability to set BIOS settings ' 'not detected on this platform') return self._varsetbiosurl @property def _sensormap(self): if not self._varsensormap: for chassis in self.sysinfo.get('Links', {}).get('Chassis', []): self._mapchassissensors(chassis) return self._varsensormap def _mapchassissensors(self, chassis): chassisurl = chassis['@odata.id'] chassisinfo = self._do_web_request(chassisurl) powurl = chassisinfo.get('Power', {}).get('@odata.id', '') if powurl: powinf = self._do_web_request(powurl) for voltage in powinf.get('Voltages', []): if 'Name' in voltage: self._varsensormap[voltage['Name']] = { 'name': voltage['Name'], 'url': powurl, 'type': 'Voltage'} thermurl = chassisinfo.get('Thermal', {}).get('@odata.id', '') if thermurl: therminf = self._do_web_request(thermurl) for fan in therminf.get('Fans', []): if 'Name' in fan: self._varsensormap[fan['Name']] = { 'name': fan['Name'], 'type': 'Fan', 'url': thermurl} for temp in therminf.get('Temperatures', []): if 'Name' in temp: self._varsensormap[temp['Name']] = { 'name': temp['Name'], 'type': 'Temperature', 'url': thermurl} for subchassis in chassisinfo.get('Links', {}).get('Contains', []): self._mapchassissensors(subchassis) @property def _bmcurl(self): if not self._varbmcurl: self._varbmcurl = self.sysinfo.get('Links', {}).get( 'ManagedBy', [{}])[0].get('@odata.id', None) return self._varbmcurl @property def _bmcnicurl(self): if not self._varbmcnicurl: self._varbmcnicurl = self._get_bmc_nic_url() return self._varbmcnicurl def list_network_interface_names(self): bmcinfo = self._do_web_request(self._bmcurl) nicurl = bmcinfo.get('EthernetInterfaces', {}).get('@odata.id', None) if not nicurl: return niclist = self._do_web_request(nicurl) for nic in niclist.get('Members', []): curl = nic.get('@odata.id', None) if not curl: continue yield curl.rsplit('/', 1)[1] def _get_bmc_nic_url(self, name=None): bmcinfo = self._do_web_request(self._bmcurl) nicurl = bmcinfo.get('EthernetInterfaces', {}).get('@odata.id', None) niclist = self._do_web_request(nicurl) foundnics = 0 lastnicurl = None for nic in niclist.get('Members', []): curl = nic.get('@odata.id', None) if not curl: continue if name is not None: if curl.endswith('/{0}'.format(name)): return curl continue if self.oem.hostnic and curl.endswith('/{0}'.format( self.oem.hostnic)): continue nicinfo = self._do_web_request(curl) if nicinfo.get('Links', {}).get('HostInterface', None): # skip host interface continue if not nicinfo.get('InterfaceEnabled', True): # skip disabled interfaces continue foundnics += 1 lastnicurl = curl if name is None and foundnics != 1: raise exc.PyghmiException( 'BMC does not have exactly one interface') if name is None: return lastnicurl @property def _bmcresetinfo(self): if not self._varresetbmcurl: bmcinfo = self._do_web_request(self._bmcurl) resetinf = bmcinfo.get('Actions', {}).get('#Manager.Reset', {}) url = resetinf.get('target', '') valid = resetinf.get('[email protected]', []) if not valid: tmpurl = resetinf.get('@Redfish.ActionInfo', None) if tmpurl: resetinf = self._do_web_request(tmpurl) valid = resetinf.get('Parameters', [{}])[0].get( 'AllowableValues') resettype = None if 'GracefulRestart' in valid: resettype = 'GracefulRestart' elif 'ForceRestart' in valid: resettype = 'ForceRestart' elif 'ColdReset' in valid: resettype = 'ColdReset' self._varresetbmcurl = url, resettype return self._varresetbmcurl def reset_bmc(self): url, action = self._bmcresetinfo if not url: raise Exception('BMC does not provide reset action') if not action: raise Exception('BMC does not accept a recognized reset type') self._do_web_request(url, {'ResetType': action}) def set_identify(self, on=True, blink=None): self._do_web_request( self.sysurl, {'IndicatorLED': 'Blinking' if blink else 'Lit' if on else 'Off'}, method='PATCH', etag='*') _idstatemap = { 'Blinking': 'blink', 'Lit': 'on', 'Off': 'off', } def get_identify(self): ledstate = self.sysinfo['IndicatorLED'] return {'identifystate': self._idstatemap[ledstate]} def get_health(self, verbose=True): health = self.sysinfo.get('Status', {}) health = health.get('HealthRollup', health.get('Health', 'Unknown')) warnunknown = health == 'Unknown' health = _healthmap[health] summary = {'badreadings': [], 'health': health} if health > 0 and verbose: # now have to manually peruse all psus, fans, processors, ram, # storage procsumstatus = self.sysinfo.get('ProcessorSummary', {}).get( 'Status', {}) procsumstatus = procsumstatus.get('HealthRollup', procsumstatus.get('Health', None)) if procsumstatus != 'OK': procfound = False procurl = self.sysinfo.get('Processors', {}).get('@odata.id', None) if procurl: for cpu in self._do_web_request(procurl).get( 'Members', []): cinfo = self._do_web_request(cpu['@odata.id']) if cinfo.get('Status', {}).get( 'State', None) == 'Absent': continue if cinfo.get('Status', {}).get( 'Health', None) not in ('OK', None): procfound = True summary['badreadings'].append(SensorReading(cinfo)) if not procfound: procinfo = self.sysinfo['ProcessorSummary'] procinfo['Name'] = 'Processors' summary['badreadings'].append(SensorReading(procinfo)) memsumstatus = self.sysinfo.get( 'MemorySummary', {}).get('Status', {}) memsumstatus = memsumstatus.get('HealthRollup', memsumstatus.get('Health', None)) if memsumstatus != 'OK': dimmfound = False for mem in self._do_web_request( self.sysinfo['Memory']['@odata.id'])['Members']: dimminfo = self._do_web_request(mem['@odata.id']) if dimminfo.get('Status', {}).get( 'State', None) == 'Absent': continue if dimminfo.get('Status', {}).get( 'Health', None) not in ('OK', None): summary['badreadings'].append(SensorReading(dimminfo)) dimmfound = True if not dimmfound: meminfo = self.sysinfo['MemorySummary'] meminfo['Name'] = 'Memory' summary['badreadings'].append(SensorReading(meminfo)) for adapter in self.sysinfo['PCIeDevices']: adpinfo = self._do_web_request(adapter['@odata.id']) if adpinfo['Status']['Health'] not in ('OK', None): summary['badreadings'].append(SensorReading(adpinfo)) for fun in self.sysinfo['PCIeFunctions']: funinfo = self._do_web_request(fun['@odata.id']) if funinfo['Status']['Health'] not in ('OK', None): summary['badreadings'].append(SensorReading(funinfo)) if warnunknown and not summary['badreadings']: unkinf = SensorReading({'Name': 'BMC', 'Status': {'Health': 'Unknown'}}) unkinf.states = ['System does not provide health information'] summary['badreadings'].append(unkinf) return summary def _get_biosreg(self, url): addon = {} valtodisplay = {} displaytoval = {} reg = self._do_web_request(url) reg = reg['RegistryEntries'] for attr in reg['Attributes']: vals = attr.get('Value', []) if vals: valtodisplay[attr['AttributeName']] = {} displaytoval[attr['AttributeName']] = {} for val in vals: valtodisplay[ attr['AttributeName']][val['ValueName']] = val[ 'ValueDisplayName'] displaytoval[ attr['AttributeName']][val['ValueDisplayName']] = val[ 'ValueName'] defaultval = attr.get('DefaultValue', None) defaultval = valtodisplay.get(attr['AttributeName'], {}).get( defaultval, defaultval) if attr['Type'] == 'Integer' and defaultval: defaultval = int(defaultval) if attr['Type'] == 'Boolean': vals = [{'ValueDisplayName': 'True'}, {'ValueDisplayName': 'False'}] addon[attr['AttributeName']] = { 'default': defaultval, 'help': attr.get('HelpText', None), 'sortid': attr.get('DisplayOrder', None), 'possible': [x['ValueDisplayName'] for x in vals], } return addon, valtodisplay, displaytoval, reg def get_bmc_configuration(self): """Get miscellaneous BMC configuration In much the same way a bmc can present arbitrary key-value structure for BIOS/UEFI configuration, provide a mechanism for a BMC to provide arbitrary key-value for BMC specific settings. """ # For now, this is a stub, no implementation for redfish currently return self.oem.get_bmc_configuration() def set_bmc_configuration(self, changeset): """Get miscellaneous BMC configuration In much the same way a bmc can present arbitrary key-value structure for BIOS/UEFI configuration, provide a mechanism for a BMC to provide arbitrary key-value for BMC specific settings. """ # For now, this is a stub, no implementation for redfish currently return self.oem.set_bmc_configuration(changeset) def clear_bmc_configuration(self): """Reset BMC to factory default Call appropriate function to clear BMC to factory default settings. In many cases, this may render remote network access impracticle or impossible." """ raise exc.UnsupportedFunctionality( 'Clear BMC configuration not supported in redfish yet') def get_system_configuration(self, hideadvanced=True): return self._getsyscfg()[0] def _getsyscfg(self): biosinfo = self._do_web_request(self._biosurl, cache=False) reginfo = ({}, {}, {}, {}) extrainfo = {} valtodisplay = {} self.attrdeps = {'Dependencies': [], 'Attributes': []} if 'AttributeRegistry' in biosinfo: overview = self._do_web_request('/redfish/v1/') reglist = overview['Registries']['@odata.id'] reglist = self._do_web_request(reglist) regurl = None for cand in reglist.get('Members', []): cand = cand.get('@odata.id', '') candname = cand.split('/')[-1] if candname == '': # implementation uses trailing slash candname = cand.split('/')[-2] if candname == biosinfo['AttributeRegistry']: regurl = cand break if not regurl: # Workaround a vendor bug where they link to a # non-existant name for cand in reglist.get('Members', []): cand = cand.get('@odata.id', '') candname = cand.split('/')[-1] candname = candname.split('.')[0] if candname == biosinfo[ 'AttributeRegistry'].split('.')[0]: regurl = cand break if regurl: reginfo = self._do_web_request(regurl) for reg in reginfo.get('Location', []): if reg.get('Language', 'en').startswith('en'): reguri = reg['Uri'] reginfo = self._get_biosreg(reguri) extrainfo, valtodisplay, _, self.attrdeps = reginfo currsettings = {} try: pendingsettings = self._do_web_request(self._setbiosurl) except exc.UnsupportedFunctionality: pendingsettings = {} pendingsettings = pendingsettings.get('Attributes', {}) for setting in biosinfo.get('Attributes', {}): val = biosinfo['Attributes'][setting] currval = val if setting in pendingsettings: val = pendingsettings[setting] val = valtodisplay.get(setting, {}).get(val, val) currval = valtodisplay.get(setting, {}).get(currval, currval) val = {'value': val} if currval != val['value']: val['active'] = currval val.update(**extrainfo.get(setting, {})) currsettings[setting] = val return currsettings, reginfo def clear_system_configuration(self): """Clear the BIOS/UEFI configuration """ biosinfo = self._do_web_request(self._biosurl) rb = biosinfo.get('Actions', {}).get('#Bios.ResetBios', {}) actinf = rb.get('@Redfish.ActionInfo', None) rb = rb.get('target', '') parms = {} if actinf: actinf = self._do_web_request( '/redfish/v1/Systems/Self/Bios/ResetBiosActionInfo') for parm in actinf.get('Parameters', ()): if parm.get('Required', False): if parm.get('Name', None) == 'ResetType' and parm.get( 'AllowableValues', [None])[0] == 'Reset': parms['ResetType'] = 'Reset' else: raise Exception( 'Unrecognized required parameter {0}'.format( parm.get('Name', 'Unknown'))) if not rb: raise Exception('BIOS reset not detected on this system') if not parms: parms = {'Action': 'Bios.ResetBios'} self._do_web_request(rb, parms) def set_system_configuration(self, changeset): while True: try: self._set_system_configuration(changeset) return except exc.RedfishError as re: if 'etag' not in re.msgid.lower(): raise def _set_system_configuration(self, changeset): currsettings, reginfo = self._getsyscfg() rawsettings = self._do_web_request(self._biosurl, cache=False) rawsettings = rawsettings.get('Attributes', {}) pendingsettings = self._do_web_request(self._setbiosurl) etag = pendingsettings.get('@odata.etag', None) pendingsettings = pendingsettings.get('Attributes', {}) dephandler = AttrDependencyHandler(self.attrdeps, rawsettings, pendingsettings) for change in list(changeset): if change not in currsettings: found = False for attr in currsettings: if fnmatch(attr.lower(), change.lower()): found = True changeset[attr] = changeset[change] if fnmatch(attr.lower(), change.replace('.', '_').lower()): found = True changeset[attr] = changeset[change] if found: del changeset[change] for change in changeset: changeval = changeset[change] overrides, blameattrs = dephandler.get_overrides(change) meta = {} for attr in self.attrdeps['Attributes']: if attr['AttributeName'] == change: meta = dict(attr) break meta.update(**overrides.get(change, {})) if meta.get('ReadOnly', False) or meta.get('GrayOut', False): errstr = '{0} is read only'.format(change) if blameattrs: errstr += (' due to one of the following settings: ' '{0}'.format(','.join(sorted(blameattrs))) ) raise exc.InvalidParameterValue(errstr) if (currsettings.get(change, {}).get('possible', []) and changeval not in currsettings[change]['possible']): normval = changeval.lower() normval = re.sub(r'\s+', ' ', normval) if not normval.endswith('*'): normval += '*' for cand in currsettings[change]['possible']: if fnmatch(cand.lower(), normval): changeset[change] = cand break else: raise exc.InvalidParameterValue( '{0} is not a valid value for {1} ({2})'.format( changeval, change, ','.join( currsettings[change]['possible']))) if changeset[change] in reginfo[2].get(change, {}): changeset[change] = reginfo[2][change][changeset[change]] for regentry in reginfo[3].get('Attributes', []): if change in (regentry.get('AttributeName', ''), regentry.get('DisplayName', '')): if regentry.get('Type', None) == 'Integer': changeset[change] = int(changeset[change]) if regentry.get('Type', None) == 'Boolean': changeset[change] = _to_boolean(changeset[change]) redfishsettings = {'Attributes': changeset} self._do_web_request(self._setbiosurl, redfishsettings, 'PATCH', etag=etag) def set_net_configuration(self, ipv4_address=None, ipv4_configuration=None, ipv4_gateway=None, name=None): patch = {} ipinfo = {} dodhcp = None netmask = None if ipv4_address: if '/' in ipv4_address: ipv4_address, cidr = ipv4_address.split('/') netmask = _cidr_to_mask(int(cidr)) patch['IPv4StaticAddresses'] = [ipinfo] ipinfo['Address'] = ipv4_address ipv4_configuration = 'static' if netmask: ipinfo['SubnetMask'] = netmask if ipv4_gateway: patch['IPv4StaticAddresses'] = [ipinfo] ipinfo['Gateway'] = ipv4_gateway ipv4_configuration = 'static' if ipv4_configuration.lower() == 'dhcp': dodhcp = True patch['DHCPv4'] = {'DHCPEnabled': True} elif (ipv4_configuration == 'static' or 'IPv4StaticAddresses' in patch): dodhcp = False patch['DHCPv4'] = {'DHCPEnabled': False} if patch: nicurl = self._get_bmc_nic_url(name) try: self._do_web_request(nicurl, patch, 'PATCH') except exc.RedfishError: patch = {'IPv4Addresses': [ipinfo]} if dodhcp: ipinfo['AddressOrigin'] = 'DHCP' elif dodhcp is not None: ipinfo['AddressOrigin'] = 'Static' self._do_web_request(nicurl, patch, 'PATCH') def get_net_configuration(self, name=None): nicurl = self._get_bmc_nic_url(name) netcfg = self._do_web_request(nicurl, cache=False) ipv4 = netcfg.get('IPv4Addresses', {}) if not ipv4: raise exc.PyghmiException('Unable to locate network information') retval = {} if len(netcfg['IPv4Addresses']) != 1: netcfg['IPv4Addresses'] = [ x for x in netcfg['IPv4Addresses'] if x['Address'] != '0.0.0.0'] if len(netcfg['IPv4Addresses']) != 1: raise exc.PyghmiException('Multiple IP addresses not supported') currip = netcfg['IPv4Addresses'][0] cidr = _mask_to_cidr(currip['SubnetMask']) retval['ipv4_address'] = '{0}/{1}'.format(currip['Address'], cidr) retval['mac_address'] = netcfg['MACAddress'] hasgateway = _mask_to_cidr(currip['Gateway']) retval['ipv4_gateway'] = currip['Gateway'] if hasgateway else None retval['ipv4_configuration'] = currip['AddressOrigin'] return retval def get_hostname(self): netcfg = self._do_web_request(self._bmcnicurl) return netcfg['HostName'] def set_hostname(self, hostname): self._do_web_request(self._bmcnicurl, {'HostName': hostname}, 'PATCH') def get_firmware(self, components=()): try: for firminfo in self.oem.get_firmware_inventory(components): yield firminfo except exc.BypassGenericBehavior: return fwlist = self._do_web_request(self._fwinventory) fwurls = [x['@odata.id'] for x in fwlist.get('Members', [])] self._fwnamemap = {} for res in self._do_bulk_requests(fwurls): res = self._extract_fwinfo(res) if res[0] is None: continue yield res def _extract_fwinfo(self, inf): currinf = {} fwi, url = inf fwname = fwi.get('Name', 'Unknown') if fwname in self._fwnamemap: fwname = fwi.get('Id', fwname) if fwname in self._fwnamemap: # Block duplicates for by name retrieval self._fwnamemap[fwname] = None else: self._fwnamemap[fwname] = url currinf['name'] = fwname currinf['id'] = fwi.get('Id', None) currinf['version'] = fwi.get('Version', 'Unknown') currinf['date'] = parse_time(fwi.get('ReleaseDate', '')) if not (currinf['version'] or currinf['date']): return None, None # TODO(Jarrod Johnson): OEM extended data with buildid currstate = fwi.get('Status', {}).get('State', 'Unknown') if currstate == 'StandbyOffline': currinf['state'] = 'pending' elif currstate == 'Enabled': currinf['state'] = 'active' elif currstate == 'StandbySpare': currinf['state'] = 'backup' return fwname, currinf def get_inventory_descriptions(self, withids=False): return self.oem.get_inventory_descriptions(withids) def get_inventory_of_component(self, component): return self.oem.get_inventory_of_component(component) def get_inventory(self, withids=False): return self.oem.get_inventory(withids) @property def oem(self): if not self._oem: self._oem = oem.get_oem_handler( self.sysinfo, self.sysurl, self.wc, self._urlcache, self) self._oem.set_credentials(self.username, self.password) return self._oem def get_description(self): return self.oem.get_description() def get_event_log(self, clear=False): bmcinfo = self._do_web_request(self._bmcurl) lsurl = bmcinfo.get('LogServices', {}).get('@odata.id', None) if not lsurl: return currtime = bmcinfo.get('DateTime', None) correction = timedelta(0) utz = tz.tzoffset('', 0) ltz = tz.gettz() if currtime: currtime = parse_time(currtime) if currtime: now = datetime.now(utz) try: correction = now - currtime except TypeError: correction = now - currtime.replace(tzinfo=utz) lurls = self._do_web_request(lsurl).get('Members', []) for lurl in lurls: lurl = lurl['@odata.id'] loginfo = self._do_web_request(lurl, cache=(not clear)) entriesurl = loginfo.get('Entries', {}).get('@odata.id', None) if not entriesurl: continue logid = loginfo.get('Id', '') entries = self._do_web_request(entriesurl, cache=False) if clear: # The clear is against the log service etag, not entries # so we have to fetch service etag after we fetch entries # until we can verify that the etag is consistent to prove # that the clear is atomic newloginfo = self._do_web_request(lurl, cache=False) clearurl = newloginfo.get('Actions', {}).get( '#LogService.ClearLog', {}).get('target', '') while clearurl: try: self._do_web_request(clearurl, method='POST', payload={}) clearurl = False except exc.PyghmiException as e: if 'EtagPreconditionalFailed' not in str(e): raise # This doesn't guarantee atomicity, but it mitigates # greatly. Unfortunately some implementations # mutate the tag endlessly and we have no hope entries = self._do_web_request(entriesurl, cache=False) newloginfo = self._do_web_request(lurl, cache=False) for log in entries.get('Members', []): record = {} record['log_id'] = logid parsedtime = parse_time(log.get('Created', '')) if parsedtime: entime = parsedtime + correction entime = entime.astimezone(ltz) record['timestamp'] = entime.strftime('%Y-%m-%dT%H:%M:%S') else: record['timestamp'] = log.get('Created', '') record['message'] = log.get('Message', None) record['severity'] = _healthmap.get( entries.get('Severity', 'Warning'), const.Health.Critical) yield record def get_sensor_descriptions(self): for sensor in natural_sort(self._sensormap): yield self._sensormap[sensor] def get_sensor_reading(self, sensorname): if sensorname not in self._sensormap: raise Exception('Sensor not found') sensor = self._sensormap[sensorname] reading = self._do_web_request(sensor['url'], cache=1) return self._extract_reading(sensor, reading) def get_sensor_data(self): for sensor in natural_sort(self._sensormap): yield self.get_sensor_reading(sensor) def _extract_reading(self, sensor, reading): if sensor['type'] == 'Fan': for fan in reading['Fans']: if fan['Name'] == sensor['name']: val = fan.get('Reading', None) unavail = val is None units = fan.get('ReadingUnits', None) return SensorReading( None, sensor, value=val, units=units, unavailable=unavail) elif sensor['type'] == 'Temperature': for temp in reading['Temperatures']: if temp['Name'] == sensor['name']: val = temp.get('ReadingCelsius', None) unavail = val is None return SensorReading( None, sensor, value=val, units='°C', unavailable=unavail) elif sensor['type'] == 'Voltage': for volt in reading['Voltages']: if volt['Name'] == sensor['name']: val = volt.get('ReadingVolts', None) unavail = val is None return SensorReading( None, sensor, value=val, units='V', unavailable=unavail) def list_media(self): return self.oem.list_media(self) def get_storage_configuration(self): """"Get storage configuration data Retrieves the storage configuration from the target. Data is given about disks, pools, and volumes. When referencing something, use the relevant 'cfgpath' attribute to describe it. It is not guaranteed that cfgpath will be consistent version to version, so a lookup is suggested in end user applications. :return: A pyghmi.storage.ConfigSpec object describing current config """ return self.oem.get_storage_configuration() def remove_storage_configuration(self, cfgspec): """Remove specified storage configuration from controller. :param cfgspec: A pyghmi.storage.ConfigSpec describing what to remove :return: """ return self.oem.remove_storage_configuration(cfgspec) def apply_storage_configuration(self, cfgspec=None): """Evaluate a configuration for validity This will check if configuration is currently available and, if given, whether the specified cfgspec can be applied. :param cfgspec: A pyghmi.storage.ConfigSpec describing desired oonfig :return: """ return self.oem.apply_storage_configuration(cfgspec) def attach_remote_media(self, url, username=None, password=None): """Attach remote media by url Given a url, attach remote media (cd/usb image) to the target system. :param url: URL to indicate where to find image (protocol support varies by BMC) :param username: Username for endpoint to use when accessing the URL. If applicable, 'domain' would be indicated by '@' or '\' syntax. :param password: Password for endpoint to use when accessing the URL. """ # At the moment, there isn't a viable way to # identify the correct resource ahead of time. # As such it's OEM specific until the standard # provides a better way. bmcinfo = self._do_web_request(self._bmcurl) vmcoll = bmcinfo.get('VirtualMedia', {}).get('@odata.id', None) vmurls = None if vmcoll: vmlist = self._do_web_request(vmcoll) vmurls = [x['@odata.id'] for x in vmlist.get('Members', [])] try: self.oem.attach_remote_media(url, username, password, vmurls) except exc.BypassGenericBehavior: return for vmurl in vmurls: vminfo = self._do_web_request(vmurl, cache=False) if vminfo.get('ConnectedVia', None) != 'NotConnected': continue self._do_web_request(vmurl, {'Image': url, 'Inserted': True}, 'PATCH') break def detach_remote_media(self): bmcinfo = self._do_web_request(self._bmcurl) vmcoll = bmcinfo.get('VirtualMedia', {}).get('@odata.id', None) try: self.oem.detach_remote_media() except exc.BypassGenericBehavior: return if vmcoll: vmlist = self._do_web_request(vmcoll) vmurls = [x['@odata.id'] for x in vmlist.get('Members', [])] for vminfo in self._do_bulk_requests(vmurls): vminfo, currl = vminfo if vminfo['Image']: self._do_web_request(currl, {'Image': None, 'Inserted': False}, method='PATCH') def upload_media(self, filename, progress=None, data=None): """Upload a file to be hosted on the target BMC This will upload the specified data to the BMC so that it will make it available to the system as an emulated USB device. :param filename: The filename to use, the basename of the parameter will be given to the bmc. :param progress: Optional callback for progress updates """ return self.oem.upload_media(filename, progress, data) def update_firmware(self, file, data=None, progress=None, bank=None): """Send file to BMC to perform firmware update :param filename: The filename to upload to the target BMC :param data: The payload of the firmware. Default is to read from specified filename. :param progress: A callback that will be given a dict describing update process. Provide if :param bank: Indicate a target 'bank' of firmware if supported """ return self.oem.update_firmware(file, data, progress, bank) def get_diagnostic_data(self, savefile, progress=None, autosuffix=False): if os.path.exists(savefile) and not os.path.isdir(savefile): raise exc.InvalidParameterValue( 'Not allowed to overwrite existing file: {0}'.format( savefile)) return self.oem.get_diagnostic_data(savefile, progress, autosuffix) def get_licenses(self): return self.oem.get_licenses() def delete_license(self, name): return self.oem.delete_license(name) def save_licenses(self, directory): if os.path.exists(directory) and not os.path.isdir(directory): raise exc.InvalidParameterValue( 'Not allowed to overwrite existing file: {0}'.format( directory)) return self.oem.save_licenses(directory) def apply_license(self, filename, progress=None, data=None): return self.oem.apply_license(filename, progress, data) if __name__ == '__main__': print(repr( Command(sys.argv[1], os.environ['BMCUSER'], os.environ['BMCPASS'], verifycallback=lambda x: True).get_power()))
[]
[]
[ "BMCUSER", "BMCPASS" ]
[]
["BMCUSER", "BMCPASS"]
python
2
0
integration-cli/requirements_test.go
package main import ( "context" "fmt" "io/ioutil" "net/http" "os" "os/exec" "strconv" "strings" "testing" "time" "github.com/demonoid81/moby/api/types" "github.com/demonoid81/moby/api/types/swarm" "github.com/demonoid81/moby/api/types/versions" "github.com/demonoid81/moby/client" "github.com/demonoid81/moby/integration-cli/requirement" "github.com/demonoid81/moby/testutil/registry" ) func ArchitectureIsNot(arch string) bool { return os.Getenv("DOCKER_ENGINE_GOARCH") != arch } func DaemonIsWindows() bool { return testEnv.OSType == "windows" } func DaemonIsWindowsAtLeastBuild(buildNumber int) func() bool { return func() bool { if testEnv.OSType != "windows" { return false } version := testEnv.DaemonInfo.KernelVersion numVersion, _ := strconv.Atoi(strings.Split(version, " ")[1]) return numVersion >= buildNumber } } func DaemonIsLinux() bool { return testEnv.OSType == "linux" } func MinimumAPIVersion(version string) func() bool { return func() bool { return versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), version) } } func OnlyDefaultNetworks() bool { cli, err := client.NewClientWithOpts(client.FromEnv) if err != nil { return false } networks, err := cli.NetworkList(context.TODO(), types.NetworkListOptions{}) if err != nil || len(networks) > 0 { return false } return true } func IsAmd64() bool { return os.Getenv("DOCKER_ENGINE_GOARCH") == "amd64" } func NotArm() bool { return ArchitectureIsNot("arm") } func NotArm64() bool { return ArchitectureIsNot("arm64") } func NotPpc64le() bool { return ArchitectureIsNot("ppc64le") } func UnixCli() bool { return isUnixCli } func Network() bool { // Set a timeout on the GET at 15s const timeout = 15 * time.Second const url = "https://hub.docker.com" client := http.Client{ Timeout: timeout, } resp, err := client.Get(url) if err != nil && strings.Contains(err.Error(), "use of closed network connection") { panic(fmt.Sprintf("Timeout for GET request on %s", url)) } if resp != nil { resp.Body.Close() } return err == nil } func Apparmor() bool { if strings.HasPrefix(testEnv.DaemonInfo.OperatingSystem, "SUSE Linux Enterprise Server ") { return false } buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") return err == nil && len(buf) > 1 && buf[0] == 'Y' } func Devicemapper() bool { return strings.HasPrefix(testEnv.DaemonInfo.Driver, "devicemapper") } func IPv6() bool { cmd := exec.Command("test", "-f", "/proc/net/if_inet6") return cmd.Run() != nil } func UserNamespaceROMount() bool { // quick case--userns not enabled in this test run if os.Getenv("DOCKER_REMAP_ROOT") == "" { return true } if _, _, err := dockerCmdWithError("run", "--rm", "--read-only", "busybox", "date"); err != nil { return false } return true } func NotUserNamespace() bool { root := os.Getenv("DOCKER_REMAP_ROOT") return root == "" } func UserNamespaceInKernel() bool { if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) { /* * This kernel-provided file only exists if user namespaces are * supported */ return false } // We need extra check on redhat based distributions if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil { defer f.Close() b := make([]byte, 1) _, _ = f.Read(b) return string(b) != "N" } return true } func IsPausable() bool { if testEnv.OSType == "windows" { return testEnv.DaemonInfo.Isolation == "hyperv" } return true } func IsolationIs(expectedIsolation string) bool { return testEnv.OSType == "windows" && string(testEnv.DaemonInfo.Isolation) == expectedIsolation } func IsolationIsHyperv() bool { return IsolationIs("hyperv") } func IsolationIsProcess() bool { return IsolationIs("process") } // RegistryHosting returns whether the host can host a registry (v2) or not func RegistryHosting() bool { // for now registry binary is built only if we're running inside // container through `make test`. Figure that out by testing if // registry binary is in PATH. _, err := exec.LookPath(registry.V2binary) return err == nil } func SwarmInactive() bool { return testEnv.DaemonInfo.Swarm.LocalNodeState == swarm.LocalNodeStateInactive } func TODOBuildkit() bool { return os.Getenv("DOCKER_BUILDKIT") == "" } // testRequires checks if the environment satisfies the requirements // for the test to run or skips the tests. func testRequires(t *testing.T, requirements ...requirement.Test) { t.Helper() requirement.Is(t, requirements...) }
[ "\"DOCKER_ENGINE_GOARCH\"", "\"DOCKER_ENGINE_GOARCH\"", "\"DOCKER_REMAP_ROOT\"", "\"DOCKER_REMAP_ROOT\"", "\"DOCKER_BUILDKIT\"" ]
[]
[ "DOCKER_ENGINE_GOARCH", "DOCKER_REMAP_ROOT", "DOCKER_BUILDKIT" ]
[]
["DOCKER_ENGINE_GOARCH", "DOCKER_REMAP_ROOT", "DOCKER_BUILDKIT"]
go
3
0
src/main/java/weka/core/Environment.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Environment.java * Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.util.Enumeration; import java.util.Iterator; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.TreeMap; /** * This class encapsulates a map of all environment and java system properties. * There are methods for adding and removing variables to this Environment * object as well as to the system wide global environment. There is also a * method for replacing key names (enclosed by ${}) with their associated value * in Strings. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 13893 $ */ public class Environment implements RevisionHandler { private static Environment m_systemWide = new Environment(); // Map to hold all the system environment variables + java properties private final Map<String, String> m_envVars = new TreeMap<String, String>(); /** * Constructs a new Environment object with all environment variables * and java properties set. */ public Environment() { // get the env variables first Map<String, String> env = System.getenv(); Set<String> keys = env.keySet(); Iterator<String> i = keys.iterator(); while (i.hasNext()) { String kv = i.next(); String value = env.get(kv); m_envVars.put(kv, value); } // get the java properties Properties jvmProps = System.getProperties(); Enumeration<?> pKeys = jvmProps.propertyNames(); while (pKeys.hasMoreElements()) { String kv = (String) pKeys.nextElement(); String value = jvmProps.getProperty(kv); m_envVars.put(kv, value); } m_envVars.put("weka.version", Version.VERSION); } /** * Constructor that makes a new Environment object containing all * the entries in the supplied one * * @param other the Environment object to copy to this one */ public Environment(Environment other) { m_envVars.putAll(other.m_envVars); } /** * Get the singleton system-wide (visible to every class in the running VM) * set of environment variables. * * @return the system-wide set of environment variables. */ public static Environment getSystemWide() { return m_systemWide; } /** * Tests for the presence of environment variables. * * @param source the string to test * @return true if the argument contains one or more environment variables */ public static boolean containsEnvVariables(String source) { return (source.indexOf("${") >= 0); } /** * Substitute a variable names for their values in the given string. * * @param source the source string to replace variables in * @return a String with all variable names replaced with their values * @throws Exception if an unknown variable name is encountered */ public String substitute(String source) throws Exception { // Grab each variable out of the string int index = source.indexOf("${"); while (index >= 0) { index += 2; int endIndex = source.indexOf('}'); if (endIndex >= 0 && endIndex > index + 1) { String key = source.substring(index, endIndex); // look this sucker up String replace = m_envVars.get(key); if (replace != null) { String toReplace = "${" + key + "}"; source = source.replace(toReplace, replace); } else { throw new Exception("[Environment] Variable " + key + " doesn't seem to be set."); } } else { break; } index = source.indexOf("${"); } return source; } /** * Add a variable to the internal map of this properties object. * * @param key the name of the variable * @param value its value */ public void addVariable(String key, String value) { m_envVars.put(key, value); } /** * Add a a variable to the internal map of this properties object and to the * global system-wide environment; * * @param key the name of the variable * @param value its value */ public void addVariableSystemWide(String key, String value) { addVariable(key, value); // local // system wide if (this != getSystemWide()) { getSystemWide().addVariableSystemWide(key, value); } System.setProperty(key, value); } /** * Remove a named variable from the map. * * @param key the name of the varaible to remove. */ public void removeVariable(String key) { m_envVars.remove(key); } /** * Get the names of the variables (keys) stored in the internal map. * * @return a Set of variable names (keys) */ public Set<String> getVariableNames() { return m_envVars.keySet(); } /** * Get the value for a particular variable. * * @param key the name of the variable to get * @return the associated value or null if this variable is not in the * internal map */ public String getVariableValue(String key) { return m_envVars.get(key); } /** * Main method for testing this class. * * @param args a list of strings to replace variables in (e.g. "\${os.name} * "\${java.version}") */ public static void main(String[] args) { Environment t = new Environment(); // String test = // "Here is a string with the variable ${java.version} and ${os.name} in it"; if (args.length == 0) { System.err .println("Usage: java weka.core.Environment <string> <string> ..."); } else { try { for (String arg : args) { String newS = t.substitute(arg); System.out.println("Original string:\n" + arg + "\n\nNew string:\n" + newS); } } catch (Exception ex) { ex.printStackTrace(); } } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 13893 $"); } }
[]
[]
[]
[]
[]
java
0
0
fixture/mail.py
import poplib import email import time class MailHelper: def __init__(self, app): self.app = app def get_mail(self, username, password, subject): for i in range(5): pop = poplib.POP3(self.app.config['james']['host']) pop.user(username) pop.pass_(password) num = pop.stat()[0] if num > 0: for n in range(num): msglines = pop.retr(n+1)[1] msgtext = "\n".join(map(lambda x: x.decode('utf-8'), msglines)) msg = email.message_from_string(msgtext) if msg.get("Subject") == subject: pop.dele(n+1) pop.quit() return msg.get_payload() pop.close() time.sleep(3) return None
[]
[]
[]
[]
[]
python
null
null
null
src/faro/face_workers/RankOneFaceWorker.py
''' Created on August 18th 2020 @author: Nisha Srinivas ''' import faro import os import faro.proto.proto_types as pt import faro.proto.face_service_pb2 as fsd import numpy as np import pyvision as pv import time from PIL import Image import json import faro.proto.geometry_pb2 as geo from array import array roc = None def getOptionsGroup(parser): rankone_options = parser.add_option_group("Options for RankOne") rankone_options.add_option("--img-quality", type=float, dest="img_quality",default=None) rankone_options.add_option("--num-faces", type=int, dest="num_faces", default=None) rankone_options.add_option("--min-face-size", dest="min_face_size", default='recommended') class RankOneFaceWorker(faro.FaceWorker): ''' classdocs ''' def __init__(self, options): ''' Constructor ''' ''' Initialize ROC SDK. looks for the license file and optionally we can provide a log file. If it cannot find the license then it will quit. Roc_ensure catches the error and aborts. ''' global roc import roc as _local_roc roc = _local_roc if os.environ.get('ROC_LIC') is not None: roc.roc_ensure(roc.roc_initialize(None,None)) else: self.license_file = (roc.__file__).split('python')[0] + 'ROC.lic' roc.roc_ensure(roc.roc_initialize(self.license_file.encode('utf-8'),None)) print("ROC SDK Initialized") self.img_quality = options.img_quality self.num_faces = options.num_faces self.min_face_size = options.min_face_size self.detection_threshold = self.recommendedDetectionThreshold() if self.img_quality is None: self.img_quality = self.recommendedImgQuality() if self.num_faces is None: self.num_faces = self.recommendedMaxFacesDetected() ''' ROC_Frontal : ROC frontal face detector (-30 to +30 degress yaw) ROC_FR : Represent in-the-wild-faces for comparison Note : Non-frontal faces detected by ROC_FULL and ROC_PARTIAL are not reliable for recognition. Therefore we advise against using ROC_FULL or ROC_PARTIAL in conjunction with ROC_FR or ROC_ID. ROC_FULL : ROC face detector (-100 to +100 degrees yaw) ROC_DEMOGRAPHICS - Return age, gender, sex ROC_PITCHYAW - Returns yaw and pitch ''' self.algorithm_id_detect = roc.ROC_FULL self.algorithm_id_extract = roc.ROC_MANUAL | roc.ROC_FR | roc.ROC_DEMOGRAPHICS | roc.ROC_LANDMARKS | roc.ROC_PITCHYAW roc.roc_ensure(roc.roc_preload(self.algorithm_id_detect)) roc.roc_ensure(roc.roc_preload(self.algorithm_id_extract)) def _converttoRocImage(self,imgarray): #convert to PIL image (This has to be an RGB image) image_pillow = Image.fromarray(imgarray) #conver PIL to roc image image_roc = roc.roc_image() image_roc.width = image_pillow.width image_roc.height = image_pillow.height image_roc.step = 3 * image_pillow.width image_roc.color_space = roc.ROC_BGR24 bytes = 3 * image_pillow.width * image_pillow.height image_roc.data = roc.new_uint8_t_array(bytes + 1) roc.memmove(image_roc.data, image_pillow.tobytes()) #RankOne requires a BGR image roc.roc_ensure(roc.roc_swap_channels(image_roc)) return image_roc def _rocFlatten(self,tmpl): ''' Converts roc template to serialized data. Datatype = bytes ''' buffer_size = roc.new_size_t() #calculates the bytes required to a template roc.roc_flattened_bytes(tmpl, buffer_size) buffer_size_int = roc.size_t_value(buffer_size) roc_buffer_src = roc.new_uint8_t_array(buffer_size_int) roc.roc_flatten(tmpl, roc_buffer_src) native_buffer = roc.cdata(roc_buffer_src, buffer_size_int) roc.delete_size_t(buffer_size) roc.delete_uint8_t_array(roc_buffer_src) return native_buffer def _rocUnFlatten(self, buff, template_dst): ''' Converts serialized data back to roc template. ''' #template_dst = roc.roc_template() roc_buffer_dst = roc.new_uint8_t_array(len(buff) + 1) roc.memmove(roc_buffer_dst, buff) roc.roc_unflatten(roc_buffer_dst, template_dst) roc.delete_uint8_t_array(roc_buffer_dst) return template_dst def _detect(self,im, opts): ''' In RankOne, face detection happends within the roc_represent function. There is no explicit face detection step like in dlib. But we will output the bounding box. but it is not really useful in this case. ''' ''' Rank one requires the image to be of type roc_image. Hence we will check for the image type. In this case it is a numpy array (skimage imread). Check if the image is a numpy array and if it is then conver it to a PIL image and then to a roc_image. The reason I am doing this is cause rankone provides example code to convert from PIL image to roc_image. ''' h,w,_ = im.shape if isinstance(im,np.ndarray): im = self._converttoRocImage(im) ''' indicates the smalled face to detect Face detection size is measured by the width of the face in pixels. The default value is 36. It roughly correspinds to 18 pixels between the eyes. ''' if self.min_face_size == 'recommended': self.min_face_size = self.recommendedMinFaceSize() elif self.min_face_size == 'adaptive_size': ''' A method for determining the minimum face detection size as a fraction of the image size. In the interest of efficiency, it is recommended to set a lower bound on the minimum face detection size as a fraction of the image size. Given a relative minimum size of 4% of the image dimensions, and an absolute minimum size of 36 pixels, the adaptive minimum size is: max(max(image.width, image.height) * 0.04, 36). Example roc_image image = ...; size_t adaptive_minimum_size; roc_adaptive_minimum_size(image, 0.04, 36, &adaptive_minimum_size); ''' adaptive_minimum_size = new_size_t() roc_ensure(roc_adaptive_minimum_size(im, 0.04, 36, adaptive_minimum_size)) else: self.min_face_size = int(self.min_face_size) self.detection_threshold = opts.threshold if opts.best: self.num_faces = 1 #create a template array templates = roc.new_roc_template_array(self.num_faces) if self.min_face_size != 'adaptive_size': roc.roc_represent(im, self.algorithm_id_detect, self.min_face_size, self.num_faces, self.detection_threshold, self.img_quality, templates) else: roc.roc_represent(im, self.algorithm_id_detect, size_t_value(adaptive_minimum_size), self.num_faces, detection_threshold, self.img_quality, templates) roc.delete_size_t(adaptive_minimum_size) # we don't need to check for best mode here. If a failed detection occurs then #create a template by manually specifying the bounding box # fix the missing detection case curr_template = roc.roc_template_array_getitem(templates, 0) if (curr_template.algorithm_id == 0 or curr_template.algorithm_id & roc.ROC_INVALID): curr_template = roc.roc_template_array_getitem(templates, 0) curr_template.detection.x = int(w * 0.5) curr_template.detection.y = int(h * 0.5) curr_template.detection.width = w curr_template.detection.height = h roc.roc_template_array_setitem(templates,0,curr_template) roc.roc_represent(im, roc.ROC_MANUAL, self.min_face_size, 1, self.detection_threshold, self.img_quality, templates) roc.roc_free_image(im) return templates def detect(self,img,face_records,options): detected_templates = self._detect(img,options) for i in range(0,self.num_faces): curr_template = roc.roc_template_array_getitem(detected_templates, i) if curr_template.algorithm_id & roc.ROC_INVALID or curr_template.algorithm_id == 0: continue else: face_record = face_records.face_records.add() face_record.detection.score = curr_template.detection.confidence xc, yc, w, h = curr_template.detection.x, curr_template.detection.y, curr_template.detection.width, curr_template.detection.height x = int(xc - (w*0.5)) y = int(yc - (w*0.5)) face_record.detection.location.CopyFrom(pt.rect_val2proto(x, y, w, h)) face_record.detection.detection_id = i face_record.detection.detection_class = "FACE" face_record.template.buffer = self._rocFlatten(curr_template) #Free all the roc stuff for i in range(0,self.num_faces): roc.roc_free_template(roc.roc_template_array_getitem(detected_templates,i)) def extract(self, img, face_records): if isinstance(img,np.ndarray): im = self._converttoRocImage(img) for face_record in face_records.face_records: template_dst = roc.roc_template() self._rocUnFlatten(face_record.template.buffer, template_dst) roc.roc_represent(im, self.algorithm_id_extract, self.recommendedMinFaceSize(), 1, self.recommendedDetectionThreshold(), self.recommendedImgQuality(), template_dst) if template_dst.algorithm_id & roc.ROC_INVALID or template_dst.algorithm_id == 0: continue else: xc, yc, w, h = template_dst.detection.x, template_dst.detection.y, template_dst.detection.width, template_dst.detection.height x = int(xc - (w*0.5)) y = int(yc - (w*0.5)) assert (face_record.detection.location.x == x), "They have to be equal cause" assert (face_record.detection.location.y == y), "They have to be equal cause" assert (face_record.detection.location.width == w), "They have to be equal cause" assert (face_record.detection.location.height == h), "They have to be equal cause" ''' default metadata fields : ChinX,ChinY, IOD (inter-occular distance), LeftEyeX, LeftEyeY, NoseRootX, NoseRootY, Path, Pose, Quality, RightEyeX, RightEyeY, Roll ''' metadata_info = json.loads(template_dst.md.decode('utf-8')) landmark = face_record.landmarks.add() landmark.landmark_id = 'Nose' landmark.location.x = metadata_info['NoseRootX'] landmark.location.y = metadata_info['NoseRootY'] landmark = face_record.landmarks.add() landmark.landmark_id = 'LeftEye' landmark.location.x = metadata_info['LeftEyeX'] landmark.location.y = metadata_info['LeftEyeY'] landmark = face_record.landmarks.add() landmark.landmark_id = 'RightEye' landmark.location.x = metadata_info['RightEyeX'] landmark.location.y = metadata_info['RightEyeY'] landmark = face_record.landmarks.add() landmark.landmark_id = 'ChinX' landmark.location.x = metadata_info['ChinX'] landmark.location.y = metadata_info['ChinY'] demographic = face_record.attributes.add() demographic.key = 'Age' demographic.text = str(metadata_info['Age']) demographic = face_record.attributes.add() demographic.key = 'Gender' demographic.text = metadata_info['Gender'] demographic = face_record.attributes.add() demographic.key = 'GeographicOrigin' demographic.text = metadata_info['GeographicOrigin'] demographic = face_record.attributes.add() demographic.key = 'Emotion' demographic.text = metadata_info['Emotion'] demographic = face_record.attributes.add() demographic.key = 'Artwork' demographic.text = metadata_info['Artwork'] demographic = face_record.attributes.add() demographic.key = 'Yaw' demographic.text = str(metadata_info['Yaw']) face_record.template.buffer = self._rocFlatten(template_dst) roc.roc_ensure(roc.roc_free_template(template_dst)) def locate(self,img,face_records,options): ''' Not needed as we find the location of the eyes, nose and chin during detection and have added it to face records during detection ''' pass def align(self,image,face_records): '''Align the images to a standard size and orientation to allow recognition.''' pass # Not needed for this algorithm. def scoreType(self): '''Return the method used to create a score from the template. By default server computation is required. SCORE_L1, SCORE_L2, SCORE_DOT, SCORE_SERVER ''' return fsd.SERVER def score(self,score_request): '''Compare templates to produce scores.''' score_type = self.scoreType() result = geo.Matrix() # Check that this is a known score type if score_type not in [fsd.SERVER]: raise NotImplementedError("Score type <%s> not implemented."%(score_type,)) # Check to make sure the probe and gallery records are correct if len(score_request.template_probes.templates) == 0: raise ValueError("no probe templates were found in the arguments.") if len(score_request.template_gallery.templates) == 0: raise ValueError("no gallery templates were found in the arguments.") #THIS IS NOT NECESSAY AS WE ARE ALWAYS COPYING THE TEMPLATES AND NOT USING FACE RECORD -> REFER TO #FUNCTION in FaceClient.py ''' if min(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) != 0: raise ValueError("probes argument cannot have both face_probes and template_probes defined.") if max(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) == 0: raise ValueError("no probe templates were found in the arguments.") if min(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) != 0: raise ValueError("gallery argument cannot have both face_gallery and template_gallery defined.") if max(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) == 0: raise ValueError("no gallery templates were found in the arguments.") ''' #This is the first attempt at computing similarity scores. This is definitely not the fastest approach. #Also , this is going to be restricted by memory. The whole similarity matrix be be held in memory. #So for large datasets this might pose a problem if score_type == fsd.SERVER: #rows = probe images #cols = gallery images sim_mat = np.zeros((len(score_request.template_probes.templates),len(score_request.template_gallery.templates)),dtype=np.float32) roc_probe_template = roc.roc_template() roc_gallery_template = roc.roc_template() #roc_gallery_template_array = roc.new_roc_template_array(len(score_request.template_gallery.templates)) sm_metric = roc.new_roc_similarity() for p in range(0,len(score_request.template_probes.templates)): self._rocUnFlatten(score_request.template_probes.templates[p].buffer,roc_probe_template) #print roc_probe_template for g in range(0,len(score_request.template_gallery.templates)): #print(p,g) #if p == 0: # roc_gallery_template = roc.roc_template() # self._rocUnFlatten(score_request.template_gallery.templates[g].buffer,roc_gallery_template) # roc.roc_template_array_setitem(roc_gallery_template_array,g,roc_gallery_template) #roc_gallery_template = roc.roc_template() self._rocUnFlatten(score_request.template_gallery.templates[g].buffer,roc_gallery_template) #roc.roc_compare_templates(roc_probe_template, roc.roc_template_array_getitem(roc_gallery_template_array,g), sm_metric) roc.roc_compare_templates(roc_probe_template, roc_gallery_template, sm_metric) sim_mat[p,g] = roc.roc_similarity_value(sm_metric) #roc.roc_free_template(roc_gallery_template) roc.delete_roc_similarity(sm_metric) roc.roc_free_template(roc_probe_template) roc.roc_free_template(roc_gallery_template) #for i in range(len(score_request.template_gallery.templates)): #print(i) # roc.roc_ensure(roc.roc_free_template(roc.roc_template_array_getitem(roc_gallery_template_array, i))) else: NotImplementedError("ScoreType %s is not implemented."%(score_type,)) #RankOne returns a similarity score of -1 if it compares with an invalid template #Threfore find all -1's in the matrix and replace it with a 0 sim_mat[sim_mat == -1.0] = 0.0 #converting the simialrity matrix to distance matrix by subtracting with 1 dist_mat = 1.0 - sim_mat # Return the result return pt.matrix_np2proto(dist_mat) def status(self): '''Return a simple status message.''' print("Handeling status request.") status_message = fsd.FaceServiceInfo() status_message.status = fsd.READY status_message.detection_support = True status_message.extract_support = True status_message.score_support = False status_message.score_type = self.scoreType() status_message.algorithm = "RankOne_%s"%(roc.__file__); status_message.detection_threshold = self.recommendedDetectionThreshold() status_message.match_threshold = self.recommendedScoreThreshold() return status_message def recommendedImgQuality(self): return roc.ROC_SUGGESTED_MIN_QUALITY def recommendedDetectionThreshold(self): ''' The false_detection_rate parameter specifies the allowable false positive rate for face detection.The suggested default value for false_detection_rate is 0.02 which corresponds to one false detection in 50 images on the FDDB benchmark. A higher false detection rate will correctly detect more faces at the cost of also incorrectly detecting more non-faces. The accepted range of values for false_detection_rate is between 0 to 1. Values outside this range will be modified to be at the aforementioned bounds automatically. ''' return 0.02 def recommendedMaxFacesDetected(self): return 10 def recommendedMinFaceSize(self): return 32 def recommendedScoreThreshold(self,far=-1): '''Return the method used to create a score from the template. By default server computation is required. Should return a recommended score threshold. DLIB recommends a value of 0.6 for LFW dataset ''' return 0.60 def cleanexit(self): print('ROC SDK Deinitialized') roc.roc_finalize()
[]
[]
[ "ROC_LIC" ]
[]
["ROC_LIC"]
python
1
0
components/PyTorch/pytorch-kfp-components/pytorch_kfp_components/components/minio/executor.py
#!/usr/bin/env/python3 # Copyright (c) Facebook, Inc. and its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Minio Executor Module.""" import os import urllib3 from minio import Minio #pylint: disable=no-name-in-module from pytorch_kfp_components.components.base.base_executor import BaseExecutor from pytorch_kfp_components.types import standard_component_specs class Executor(BaseExecutor): """Minio Executor Class.""" def __init__(self): #pylint: disable=useless-super-delegation super(Executor, self).__init__() #pylint: disable=super-with-arguments def _initiate_minio_client(self, minio_config: dict): #pylint: disable=no-self-use """Initializes the minio client. Args: minio_config : a dict for minio configuration. Returns: client : the minio server client """ minio_host = minio_config["HOST"] access_key = minio_config["ACCESS_KEY"] secret_key = minio_config["SECRET_KEY"] client = Minio( minio_host, access_key=access_key, secret_key=secret_key, secure=False, ) return client def _read_minio_creds(self, endpoint: str): #pylint: disable=no-self-use """Reads the minio credentials. Args: endpoint : minio endpoint url Raises: ValueError : if minio access key and secret keys are missing Returns: minio_config : a dict for minio configuration. """ if "MINIO_ACCESS_KEY" not in os.environ: raise ValueError("Environment variable MINIO_ACCESS_KEY not found") if "MINIO_SECRET_KEY" not in os.environ: raise ValueError("Environment variable MINIO_SECRET_KEY not found") minio_config = { "HOST": endpoint, "ACCESS_KEY": os.environ["MINIO_ACCESS_KEY"], "SECRET_KEY": os.environ["MINIO_SECRET_KEY"], } return minio_config def upload_artifacts_to_minio( #pylint: disable=no-self-use,too-many-arguments self, client: Minio, source: str, destination: str, bucket_name: str, output_dict: dict, ): """Uploads artifacts to minio server. Args: client : Minio client source : source path of artifacts. destination : destination path of artifacts bucket_name : minio bucket name. output_dict : dict of output containing destination paths, source and bucket names Raises: Exception : on MaxRetryError, NewConnectionError, ConnectionError. Returns: output_dict : dict of output containing destination paths, source and bucket names """ print(f"source {source} destination {destination}") try: client.fput_object( bucket_name=bucket_name, file_path=source, object_name=destination, ) output_dict[destination] = { "bucket_name": bucket_name, "source": source, } except ( urllib3.exceptions.MaxRetryError, urllib3.exceptions.NewConnectionError, urllib3.exceptions.ConnectionError, RuntimeError, ) as expection_raised: print(str(expection_raised)) raise Exception(expection_raised) #pylint: disable=raise-missing-from return output_dict def get_fn_args(self, input_dict: dict, exec_properties: dict): #pylint: disable=no-self-use """Extracts the source, bucket_name, folder_name from the input_dict and endpoint from exec_properties. Args: input_dict : a dict of inputs having source, destination etc. exec_properties : a dict of execution properties, having minio endpoint. Returns: source : source path of artifacts. bucket_name : name of minio bucket folder_name : name of folder in which artifacts are uploaded. endpoint : minio endpoint url. """ source = input_dict.get(standard_component_specs.MINIO_SOURCE) bucket_name = input_dict.get( standard_component_specs.MINIO_BUCKET_NAME) folder_name = input_dict.get( standard_component_specs.MINIO_DESTINATION) endpoint = exec_properties.get(standard_component_specs.MINIO_ENDPOINT) return source, bucket_name, folder_name, endpoint def Do(self, input_dict: dict, output_dict: dict, exec_properties: dict): #pylint: disable=too-many-locals """Executes the minio upload process. Args: input_dict : a dict of inputs having source, destination etc. output_dict : dict of output containing destination paths, source and bucket names exec_properties : a dict of execution properties, having minio endpoint. Raises: ValueError : for invalid/unknonwn source path """ source, bucket_name, folder_name, endpoint = self.get_fn_args( input_dict=input_dict, exec_properties=exec_properties) minio_config = self._read_minio_creds(endpoint=endpoint) client = self._initiate_minio_client(minio_config=minio_config) if not os.path.exists(source): raise ValueError("Input path - {} does not exists".format(source)) if os.path.isfile(source): artifact_name = source.split("/")[-1] destination = os.path.join(folder_name, artifact_name) self.upload_artifacts_to_minio( client=client, source=source, destination=destination, bucket_name=bucket_name, output_dict=output_dict, ) elif os.path.isdir(source): for root, dirs, files in os.walk(source): #pylint: disable=unused-variable for file in files: source = os.path.join(root, file) artifact_name = source.split("/")[-1] destination = os.path.join(folder_name, artifact_name) self.upload_artifacts_to_minio( client=client, source=source, destination=destination, bucket_name=bucket_name, output_dict=output_dict, ) else: raise ValueError("Unknown source: {} ".format(source))
[]
[]
[ "MINIO_SECRET_KEY", "MINIO_ACCESS_KEY" ]
[]
["MINIO_SECRET_KEY", "MINIO_ACCESS_KEY"]
python
2
0
src/runtime/virtcontainers/pkg/rootless/rootless.go
// Copyright (c) 2019 Intel Corporation // // SPDX-License-Identifier: Apache-2.0 // // Copyright 2015-2019 CNI authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rootless import ( "context" "crypto/rand" "fmt" "os" "path/filepath" "runtime" "strings" "sync" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils" "github.com/containernetworking/plugins/pkg/ns" "github.com/opencontainers/runc/libcontainer/userns" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // isRootless states whether execution is rootless or not // If nil, rootless is auto-detected isRootless *bool // lock for the initRootless and isRootless variables rLock sync.Mutex // XDG_RUNTIME_DIR defines the base directory relative to // which user-specific non-essential runtime files are stored. rootlessDir = os.Getenv("XDG_RUNTIME_DIR") rootlessLog = logrus.WithFields(logrus.Fields{ "source": "rootless", }) // IsRootless is declared this way for mocking in unit tests IsRootless = isRootlessFunc ) func SetRootless(rootless bool) { isRootless = &rootless } // SetLogger sets up a logger for the rootless pkg func SetLogger(ctx context.Context, logger *logrus.Entry) { fields := rootlessLog.Data rootlessLog = logger.WithFields(fields) } // isRootlessFunc states whether kata is being ran with root or not func isRootlessFunc() bool { rLock.Lock() defer rLock.Unlock() // auto-detect if nil if isRootless == nil { SetRootless(true) // --rootless and --systemd-cgroup options must honoured // but with the current implementation this is not possible // https://github.com/kata-containers/runtime/issues/2412 if os.Geteuid() != 0 { return true } if userns.RunningInUserNS() { return true } SetRootless(false) } return *isRootless } // GetRootlessDir returns the path to the location for rootless // container and sandbox storage func GetRootlessDir() string { return rootlessDir } // Creates a new persistent network namespace and returns an object // representing that namespace, without switching to it func NewNS() (ns.NetNS, error) { nsRunDir := filepath.Join(GetRootlessDir(), "netns") b := make([]byte, 16) _, err := rand.Reader.Read(b) if err != nil { return nil, fmt.Errorf("failed to generate random netns name: %v", err) } // Create the directory for mounting network namespaces // This needs to be a shared mountpoint in case it is mounted in to // other namespaces (containers) err = utils.MkdirAllWithInheritedOwner(nsRunDir, 0755) if err != nil { return nil, err } nsName := fmt.Sprintf("net-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) // create an empty file at the mount point nsPath := filepath.Join(nsRunDir, nsName) mountPointFd, err := os.Create(nsPath) if err != nil { return nil, err } if err := mountPointFd.Close(); err != nil { return nil, err } // Ensure the mount point is cleaned up on errors; if the namespace // was successfully mounted this will have no effect because the file // is in-use defer func() { _ = os.RemoveAll(nsPath) }() var wg sync.WaitGroup wg.Add(1) // do namespace work in a dedicated goroutine, so that we can safely // Lock/Unlock OSThread without upsetting the lock/unlock state of // the caller of this function go (func() { defer wg.Done() runtime.LockOSThread() // Don't unlock. By not unlocking, golang will kill the OS thread when the // goroutine is done (for go1.10+) threadNsPath := getCurrentThreadNetNSPath() var origNS ns.NetNS origNS, err = ns.GetNS(threadNsPath) if err != nil { rootlessLog.Warnf("cannot open current network namespace %s: %q", threadNsPath, err) return } defer func() { if err := origNS.Close(); err != nil { rootlessLog.Errorf("unable to close namespace: %q", err) } }() // create a new netns on the current thread err = unix.Unshare(unix.CLONE_NEWNET) if err != nil { rootlessLog.Warnf("cannot create a new network namespace: %q", err) return } // Put this thread back to the orig ns, since it might get reused (pre go1.10) defer func() { if err := origNS.Set(); err != nil { if IsRootless() && strings.Contains(err.Error(), "operation not permitted") { // When running in rootless mode it will fail to re-join // the network namespace owned by root on the host. return } rootlessLog.Warnf("unable to reset namespace: %q", err) } }() // bind mount the netns from the current thread (from /proc) onto the // mount point. This causes the namespace to persist, even when there // are no threads in the ns. err = unix.Mount(threadNsPath, nsPath, "none", unix.MS_BIND, "") if err != nil { err = fmt.Errorf("failed to bind mount ns at %s: %v", nsPath, err) } })() wg.Wait() if err != nil { unix.Unmount(nsPath, unix.MNT_DETACH) return nil, fmt.Errorf("failed to create namespace: %v", err) } return ns.GetNS(nsPath) } // getCurrentThreadNetNSPath copied from pkg/ns func getCurrentThreadNetNSPath() string { // /proc/self/ns/net returns the namespace of the main thread, not // of whatever thread this goroutine is running on. Make sure we // use the thread's net namespace since the thread is switching around return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) }
[ "\"XDG_RUNTIME_DIR\"" ]
[]
[ "XDG_RUNTIME_DIR" ]
[]
["XDG_RUNTIME_DIR"]
go
1
0
test/e2e/vcctl/vcctl.go
/* Copyright 2021 The Volcano Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package vcctl import ( "os" "strings" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("Test Help option of vcctl cli", func() { It("Command: vcctl --help", func() { var output = ` Usage: vcctl [command] Available Commands: help Help about any command job vcctl command line operation job queue Queue Operations version Print the version information Flags: -h, --help help for vcctl --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) Use "vcctl [command] --help" for more information about a command. ` command := []string{"--help"} cmdOutput := RunCliCommandWithoutKubeConfig(command) exist := strings.Contains(output, cmdOutput) Expect(exist).Should(Equal(true)) }) It("Command: vcctl job --help", func() { var output = ` vcctl command line operation job Usage: vcctl job [command] Available Commands: delete delete a job list list job information resume resume a job run run job by parameters from the command line suspend abort a job view show job information Flags: -h, --help help for job Global Flags: --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) Use "vcctl job [command] --help" for more information about a command. ` command := []string{"job", "--help"} cmdOutput := RunCliCommandWithoutKubeConfig(command) exist := strings.Contains(output, cmdOutput) Expect(exist).Should(Equal(true)) }) It("Command: vcctl job list --help", func() { kubeConfig := os.Getenv("KUBECONFIG") var output = ` list job information Usage: vcctl job list [flags] Flags: --all-namespaces list jobs in all namespaces -h, --help help for list -k, --kubeconfig string (optional) absolute path to the kubeconfig file (default "` + kubeConfig + `") -s, --master string the address of apiserver -n, --namespace string the namespace of job (default "default") -S, --scheduler string list job with specified scheduler name --selector string fuzzy matching jobName Global Flags: --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) ` command := []string{"job", "list", "--help"} cmdOutput := RunCliCommandWithoutKubeConfig(command) exist := strings.Contains(output, cmdOutput) Expect(exist).Should(Equal(true)) }) It("Command: vcctl job suspend -n {$JobName} --help", func() { kubeConfig := os.Getenv("KUBECONFIG") var output = ` abort a job Usage: vcctl job suspend [flags] Flags: -h, --help help for suspend -k, --kubeconfig string (optional) absolute path to the kubeconfig file (default "` + kubeConfig + `") -s, --master string the address of apiserver -N, --name string the name of job -n, --namespace string the namespace of job (default "default") Global Flags: --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) ` command := []string{"job", "suspend", "-n", "job1", "--help"} cmdOutput := RunCliCommandWithoutKubeConfig(command) exist := strings.Contains(output, cmdOutput) Expect(exist).Should(Equal(true)) }) It("vcctl job resume -n {$JobName} --help", func() { kubeConfig := os.Getenv("KUBECONFIG") var output = ` resume a job Usage: vcctl job resume [flags] Flags: -h, --help help for resume -k, --kubeconfig string (optional) absolute path to the kubeconfig file (default "` + kubeConfig + `") -s, --master string the address of apiserver -N, --name string the name of job -n, --namespace string the namespace of job (default "default") Global Flags: --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) ` command := []string{"job", "resume", "-n", "job1", "--help"} cmdOutput := RunCliCommandWithoutKubeConfig(command) exist := strings.Contains(output, cmdOutput) Expect(exist).Should(Equal(true)) }) It("vcctl job run --help", func() { kubeConfig := os.Getenv("KUBECONFIG") var output = ` run job by parameters from the command line Usage: vcctl job run [flags] Flags: -f, --filename string the yaml file of job -h, --help help for run -i, --image string the container image of job (default "busybox") -k, --kubeconfig string (optional) absolute path to the kubeconfig file (default "` + kubeConfig + `") -L, --limits string the resource limit of the task (default "cpu=1000m,memory=100Mi") -s, --master string the address of apiserver -m, --min int the minimal available tasks of job (default 1) -N, --name string the name of job -n, --namespace string the namespace of job (default "default") -r, --replicas int the total tasks of job (default 1) -R, --requests string the resource request of the task (default "cpu=1000m,memory=100Mi") -S, --scheduler string the scheduler for this job (default "volcano") Global Flags: --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) ` command := []string{"job", "run", "--help"} cmdOutput := RunCliCommandWithoutKubeConfig(command) exist := strings.Contains(output, cmdOutput) Expect(exist).Should(Equal(true)) }) })
[ "\"KUBECONFIG\"", "\"KUBECONFIG\"", "\"KUBECONFIG\"", "\"KUBECONFIG\"" ]
[]
[ "KUBECONFIG" ]
[]
["KUBECONFIG"]
go
1
0
vendor/gopkg.in/src-d/go-vitess.v1/vt/vttablet/tabletmanager/replication_reporter.go
/* Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreedto in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tabletmanager import ( "flag" "fmt" "html/template" "time" "golang.org/x/net/context" "gopkg.in/src-d/go-vitess.v1/mysql" "gopkg.in/src-d/go-vitess.v1/vt/health" "gopkg.in/src-d/go-vitess.v1/vt/log" "gopkg.in/src-d/go-vitess.v1/vt/mysqlctl" ) var ( enableReplicationReporter = flag.Bool("enable_replication_reporter", false, "Register the health check module that monitors MySQL replication") ) // replicationReporter implements health.Reporter type replicationReporter struct { // set at construction time agent *ActionAgent now func() time.Time // store the last time we successfully got the lag, so if we // can't get the lag any more, we can extrapolate. lastKnownValue time.Duration lastKnownTime time.Time } // Report is part of the health.Reporter interface func (r *replicationReporter) Report(isSlaveType, shouldQueryServiceBeRunning bool) (time.Duration, error) { if !isSlaveType { return 0, nil } status, statusErr := r.agent.MysqlDaemon.SlaveStatus() if statusErr == mysql.ErrNotSlave || (statusErr == nil && !status.SlaveSQLRunning && !status.SlaveIORunning) { // MySQL is up, but slave is either not configured or not running. // Both SQL and IO threads are stopped, so it's probably either // stopped on purpose, or stopped because of a mysqld restart. if !r.agent.slaveStopped() { // As far as we've been told, it isn't stopped on purpose, // so let's try to start it. if *mysqlctl.DisableActiveReparents { log.Infof("Slave is stopped. Running with --disable_active_reparents so will not try to reconnect to master...") } else { log.Infof("Slave is stopped. Trying to reconnect to master...") ctx, cancel := context.WithTimeout(r.agent.batchCtx, 5*time.Second) if err := repairReplication(ctx, r.agent); err != nil { log.Infof("Failed to reconnect to master: %v", err) } cancel() // Check status again. status, statusErr = r.agent.MysqlDaemon.SlaveStatus() } } } if statusErr != nil { // mysqld is not running or slave is not configured. // We can't report healthy. return 0, statusErr } if !status.SlaveRunning() { // mysqld is running, but slave is not replicating (most likely, // replication has been stopped). See if we can extrapolate. if r.lastKnownTime.IsZero() { // we can't. return 0, health.ErrSlaveNotRunning } // we can extrapolate with the worst possible // value (that is we made no replication // progress since last time, and just fell more behind). elapsed := r.now().Sub(r.lastKnownTime) return elapsed + r.lastKnownValue, nil } // we got a real value, save it. r.lastKnownValue = time.Duration(status.SecondsBehindMaster) * time.Second r.lastKnownTime = r.now() return r.lastKnownValue, nil } // HTMLName is part of the health.Reporter interface func (r *replicationReporter) HTMLName() template.HTML { return template.HTML("MySQLReplicationLag") } // repairReplication tries to connect this slave to whoever is // the current master of the shard, and start replicating. func repairReplication(ctx context.Context, agent *ActionAgent) error { if *mysqlctl.DisableActiveReparents { return fmt.Errorf("can't repair replication with --disable_active_reparents") } ts := agent.TopoServer tablet := agent.Tablet() si, err := ts.GetShard(ctx, tablet.Keyspace, tablet.Shard) if err != nil { return err } if !si.HasMaster() { return fmt.Errorf("no master tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) } // If Orchestrator is configured and if Orchestrator is actively reparenting, we should not repairReplication if agent.orc != nil { re, err := agent.orc.InActiveShardRecovery(tablet) if err != nil { return err } if re { return fmt.Errorf("Orchestrator actively reparenting shard %v, skipping repairReplication", si) } // Before repairing replication, tell Orchestrator to enter maintenance mode for this tablet and to // lock any other actions on this tablet by Orchestrator. if err := agent.orc.BeginMaintenance(agent.Tablet(), "vttablet has been told to StopSlave"); err != nil { log.Warningf("Orchestrator BeginMaintenance failed: %v", err) return fmt.Errorf("Orchestrator BeginMaintenance failed :%v, skipping repairReplication", err) } } return agent.setMasterRepairReplication(ctx, si.MasterAlias, 0, true) } func registerReplicationReporter(agent *ActionAgent) { if *enableReplicationReporter { health.DefaultAggregator.Register("replication_reporter", &replicationReporter{ agent: agent, now: time.Now, }) } }
[]
[]
[]
[]
[]
go
null
null
null
transport/grpc/dial.go
// Copyright 2015 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package grpc supports network connections to GRPC servers. // This package is not intended for use by end developers. Use the // google.golang.org/api/option package to configure API clients. package grpc import ( "context" "errors" "log" "os" "strings" "go.opencensus.io/plugin/ocgrpc" "golang.org/x/oauth2" "google.golang.org/api/internal" "google.golang.org/api/option" "google.golang.org/grpc" "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/credentials/oauth" // Install grpclb, which is required for direct path. _ "google.golang.org/grpc/balancer/grpclb" ) // Set at init time by dial_appengine.go. If nil, we're not on App Engine. var appengineDialerHook func(context.Context) grpc.DialOption // Set at init time by dial_socketopt.go. If nil, socketopt is not supported. var timeoutDialerOption grpc.DialOption // Dial returns a GRPC connection for use communicating with a Google cloud // service, configured with the given ClientOptions. func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { return dial(ctx, false, opts) } // DialInsecure returns an insecure GRPC connection for use communicating // with fake or mock Google cloud service implementations, such as emulators. // The connection is configured with the given ClientOptions. func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { return dial(ctx, true, opts) } func dial(ctx context.Context, insecure bool, opts []option.ClientOption) (*grpc.ClientConn, error) { var o internal.DialSettings for _, opt := range opts { opt.Apply(&o) } if err := o.Validate(); err != nil { return nil, err } if o.HTTPClient != nil { return nil, errors.New("unsupported HTTP client specified") } if o.GRPCConn != nil { return o.GRPCConn, nil } var grpcOpts []grpc.DialOption if insecure { grpcOpts = []grpc.DialOption{grpc.WithInsecure()} } else if !o.NoAuth { if o.APIKey != "" { log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") } creds, err := internal.Creds(ctx, &o) if err != nil { return nil, err } if o.QuotaProject == "" { o.QuotaProject = internal.QuotaProjectFromCreds(creds) } // Attempt Direct Path only if: // * The endpoint is a host:port (or dns:///host:port). // * Credentials are obtained via GCE metadata server, using the default // service account. // * Opted in via GOOGLE_CLOUD_ENABLE_DIRECT_PATH environment variable. // For example, GOOGLE_CLOUD_ENABLE_DIRECT_PATH=spanner,pubsub if isDirectPathEnabled(o.Endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) { if !strings.HasPrefix(o.Endpoint, "dns:///") { o.Endpoint = "dns:///" + o.Endpoint } grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle( grpcgoogle.NewComputeEngineCredentials(), ), } // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. } else { grpcOpts = []grpc.DialOption{ grpc.WithPerRPCCredentials(grpcTokenSource{ TokenSource: oauth.TokenSource{creds.TokenSource}, quotaProject: o.QuotaProject, requestReason: o.RequestReason, }), grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), } } } if appengineDialerHook != nil { // Use the Socket API on App Engine. // appengine dialer will override socketopt dialer grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) } // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. grpcOpts = addOCStatsHandler(grpcOpts, o) grpcOpts = append(grpcOpts, o.GRPCDialOpts...) if o.UserAgent != "" { grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) } // TODO(weiranf): This socketopt dialer will be used by default at some // point when isDirectPathEnabled will default to true, we guard it by // the Directpath env var for now once we can introspect user defined // dialer (https://github.com/grpc/grpc-go/issues/2795). if timeoutDialerOption != nil && isDirectPathEnabled(o.Endpoint) { grpcOpts = append(grpcOpts, timeoutDialerOption) } return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) } func addOCStatsHandler(opts []grpc.DialOption, settings internal.DialSettings) []grpc.DialOption { if settings.TelemetryDisabled { return opts } return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) } // grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource. type grpcTokenSource struct { oauth.TokenSource // Additional metadata attached as headers. quotaProject string requestReason string } // GetRequestMetadata gets the request metadata as a map from a grpcTokenSource. func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) ( map[string]string, error) { metadata, err := ts.TokenSource.GetRequestMetadata(ctx, uri...) if err != nil { return nil, err } // Attach system parameter if ts.quotaProject != "" { metadata["X-goog-user-project"] = ts.quotaProject } if ts.requestReason != "" { metadata["X-goog-request-reason"] = ts.requestReason } return metadata, nil } func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource) bool { if ts == nil { return false } tok, err := ts.Token() if err != nil { return false } if tok == nil { return false } if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" { return false } if acct, _ := tok.Extra("oauth2.google.serviceAccount").(string); acct != "default" { return false } return true } func isDirectPathEnabled(endpoint string) bool { // Only host:port is supported, not other schemes (e.g., "tcp://" or "unix://"). // Also don't try direct path if the user has chosen an alternate name resolver // (i.e., via ":///" prefix). // // TODO(cbro): once gRPC has introspectible options, check the user hasn't // provided a custom dialer in gRPC options. if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { return false } // Only try direct path if the user has opted in via the environment variable. whitelist := strings.Split(os.Getenv("GOOGLE_CLOUD_ENABLE_DIRECT_PATH"), ",") for _, api := range whitelist { // Ignore empty string since an empty env variable splits into [""] if api != "" && strings.Contains(endpoint, api) { return true } } return false }
[ "\"GOOGLE_CLOUD_ENABLE_DIRECT_PATH\"" ]
[]
[ "GOOGLE_CLOUD_ENABLE_DIRECT_PATH" ]
[]
["GOOGLE_CLOUD_ENABLE_DIRECT_PATH"]
go
1
0
internal/cli/up.go
package cli import ( "context" _ "embed" "errors" "fmt" "log" "net/url" "os" "strconv" "time" "github.com/mattn/go-isatty" "github.com/spf13/cobra" "k8s.io/klog/v2" "github.com/tilt-dev/tilt/internal/analytics" engineanalytics "github.com/tilt-dev/tilt/internal/engine/analytics" "github.com/tilt-dev/tilt/internal/hud/prompt" "github.com/tilt-dev/tilt/internal/store" "github.com/tilt-dev/tilt/internal/store/liveupdates" "github.com/tilt-dev/tilt/pkg/assets" "github.com/tilt-dev/tilt/pkg/logger" "github.com/tilt-dev/tilt/pkg/model" "github.com/tilt-dev/tilt/web" ) var webModeFlag model.WebMode = model.DefaultWebMode const DefaultWebDevPort = 46764 var updateModeFlag string = string(liveupdates.UpdateModeAuto) var webDevPort = 0 var logActionsFlag bool = false var userExitError = errors.New("user requested Tilt exit") //go:embed Tiltfile.starter var starterTiltfile []byte type upCmd struct { fileName string outputSnapshotOnExit string legacy bool stream bool } func (c *upCmd) name() model.TiltSubcommand { return "up" } func (c *upCmd) register() *cobra.Command { cmd := &cobra.Command{ Use: "up [<tilt flags>] [-- <Tiltfile args>]", DisableFlagsInUseLine: true, Short: "Start Tilt with the given Tiltfile args", Long: ` Starts Tilt and runs services defined in the Tiltfile. There are two types of args: 1) Tilt flags, listed below, which are handled entirely by Tilt. 2) Tiltfile args, which can be anything, and are potentially accessed by config.parse in your Tiltfile. By default: 1) Tiltfile args are interpreted as the list of services to start, e.g. tilt up frontend backend. 2) Running with no Tiltfile args starts all services defined in the Tiltfile This default behavior does not apply if the Tiltfile uses config.parse or config.set_enabled_resources. In that case, see https://tilt.dev/user_config.html and/or comments in your Tiltfile When you exit Tilt (using Ctrl+C), Kubernetes resources and Docker Compose resources continue running; you can use tilt down (https://docs.tilt.dev/cli/tilt_down.html) to delete these resources. Any long-running local resources--i.e. those using serve_cmd--are terminated when you exit Tilt. `, } cmd.Flags().StringVar(&updateModeFlag, "update-mode", string(liveupdates.UpdateModeAuto), fmt.Sprintf("Control the strategy Tilt uses for updating instances. Possible values: %v", liveupdates.AllUpdateModes)) cmd.Flags().BoolVar(&c.legacy, "legacy", false, "If true, tilt will open in legacy terminal mode.") cmd.Flags().BoolVar(&c.stream, "stream", false, "If true, tilt will stream logs in the terminal.") cmd.Flags().BoolVar(&logActionsFlag, "logactions", false, "log all actions and state changes") addStartServerFlags(cmd) addDevServerFlags(cmd) addTiltfileFlag(cmd, &c.fileName) addKubeContextFlag(cmd) addNamespaceFlag(cmd) cmd.Flags().Lookup("logactions").Hidden = true cmd.Flags().StringVar(&c.outputSnapshotOnExit, "output-snapshot-on-exit", "", "If specified, Tilt will dump a snapshot of its state to the specified path when it exits") return cmd } func (c *upCmd) initialTermMode(isTerminal bool) store.TerminalMode { if !isTerminal { return store.TerminalModeStream } if c.legacy { return store.TerminalModeHUD } if c.stream { return store.TerminalModeStream } return store.TerminalModePrompt } func (c *upCmd) run(ctx context.Context, args []string) error { ctx, cancel := context.WithCancel(ctx) defer cancel() a := analytics.Get(ctx) defer a.Flush(time.Second) log.SetFlags(log.Flags() &^ (log.Ldate | log.Ltime)) isTTY := isatty.IsTerminal(os.Stdout.Fd()) termMode := c.initialTermMode(isTTY) cmdUpTags := engineanalytics.CmdTags(map[string]string{ "update_mode": updateModeFlag, // before 7/8/20 this was just called "mode" "term_mode": strconv.Itoa(int(termMode)), }) generateTiltfileResult, err := maybeGenerateTiltfile(c.fileName) // N.B. report the command before handling the error; result enum is always valid cmdUpTags["generate_tiltfile.result"] = string(generateTiltfileResult) a.Incr("cmd.up", cmdUpTags.AsMap()) if err == userExitError { return nil } else if err != nil { return err } deferred := logger.NewDeferredLogger(ctx) ctx = redirectLogs(ctx, deferred) webHost := provideWebHost() webURL, _ := provideWebURL(webHost, provideWebPort()) startLine := prompt.StartStatusLine(webURL, webHost) log.Print(startLine) log.Print(buildStamp()) if ok, reason := analytics.IsAnalyticsDisabledFromEnv(); ok { log.Printf("Tilt analytics disabled: %s", reason) } cmdUpDeps, err := wireCmdUp(ctx, a, cmdUpTags, "up") if err != nil { deferred.SetOutput(deferred.Original()) return err } upper := cmdUpDeps.Upper if termMode == store.TerminalModePrompt { // Any logs that showed up during initialization, make sure they're // in the prompt. cmdUpDeps.Prompt.SetInitOutput(deferred.CopyBuffered(logger.InfoLvl)) } l := store.NewLogActionLogger(ctx, upper.Dispatch) deferred.SetOutput(l) ctx = redirectLogs(ctx, l) if c.outputSnapshotOnExit != "" { defer cmdUpDeps.Snapshotter.WriteSnapshot(ctx, c.outputSnapshotOnExit) } err = upper.Start(ctx, args, cmdUpDeps.TiltBuild, c.fileName, termMode, a.UserOpt(), cmdUpDeps.Token, string(cmdUpDeps.CloudAddress)) if err != context.Canceled { return err } else { return nil } } func redirectLogs(ctx context.Context, l logger.Logger) context.Context { ctx = logger.WithLogger(ctx, l) log.SetOutput(l.Writer(logger.InfoLvl)) klog.SetOutput(l.Writer(logger.InfoLvl)) return ctx } func provideUpdateModeFlag() liveupdates.UpdateModeFlag { return liveupdates.UpdateModeFlag(updateModeFlag) } func provideLogActions() store.LogActionsFlag { return store.LogActionsFlag(logActionsFlag) } func provideWebMode(b model.TiltBuild) (model.WebMode, error) { switch webModeFlag { case model.LocalWebMode, model.ProdWebMode, model.EmbeddedWebMode, model.CloudWebMode, model.PrecompiledWebMode: return webModeFlag, nil case model.DefaultWebMode: // Set prod web mode from an environment variable. Useful for // running integration tests against dev tilt. webMode := os.Getenv("TILT_WEB_MODE") if webMode == "prod" { return model.ProdWebMode, nil } if b.Dev { return model.LocalWebMode, nil } else { return model.ProdWebMode, nil } } return "", model.UnrecognizedWebModeError(string(webModeFlag)) } func provideWebHost() model.WebHost { return model.WebHost(webHostFlag) } func provideWebPort() model.WebPort { return model.WebPort(webPortFlag) } func provideWebURL(webHost model.WebHost, webPort model.WebPort) (model.WebURL, error) { if webPort == 0 { return model.WebURL{}, nil } if webHost == "0.0.0.0" { // 0.0.0.0 means "listen on all hosts" // For UI displays, we use 127.0.0.1 (loopback) webHost = "127.0.0.1" } u, err := url.Parse(fmt.Sprintf("http://%s:%d/", webHost, webPort)) if err != nil { return model.WebURL{}, err } return model.WebURL(*u), nil } func targetMode(mode model.WebMode, embeddedAvailable bool) (model.WebMode, error) { if (mode == model.EmbeddedWebMode || mode == model.PrecompiledWebMode) && !embeddedAvailable { return mode, fmt.Errorf("requested %s mode, but assets are not available", string(mode)) } if mode.IsProd() { // cloud by request, embedded when available, otherwise cloud if mode != model.CloudWebMode && embeddedAvailable { mode = model.EmbeddedWebMode } else if mode == model.ProdWebMode { mode = model.CloudWebMode } } else { // precompiled when available and by request, otherwise local if mode != model.PrecompiledWebMode { mode = model.LocalWebMode } } return mode, nil } func provideAssetServer(mode model.WebMode, version model.WebVersion) (assets.Server, error) { s, ok := assets.GetEmbeddedServer() m, err := targetMode(mode, ok) if err != nil { return nil, err } switch m { case model.EmbeddedWebMode, model.PrecompiledWebMode: return s, nil case model.CloudWebMode: return assets.NewProdServer(assets.ProdAssetBucket, version) case model.LocalWebMode: path, err := web.StaticPath() if err != nil { return nil, err } pkgDir := assets.PackageDir(path) return assets.NewDevServer(pkgDir, model.WebDevPort(webDevPort)) } return nil, model.UnrecognizedWebModeError(string(mode)) }
[ "\"TILT_WEB_MODE\"" ]
[]
[ "TILT_WEB_MODE" ]
[]
["TILT_WEB_MODE"]
go
1
0
translator/translate/agent/agent_test.go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT package agent import ( "encoding/json" "testing" "github.com/aws/amazon-cloudwatch-agent/logger" "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/config" "os" "github.com/stretchr/testify/assert" ) var httpProxy string var httpsProxy string var noProxy string func TestAgentDefaultConfig(t *testing.T) { agentDefaultConfig(t, config.OS_TYPE_LINUX) agentDefaultConfig(t, config.OS_TYPE_DARWIN) } func agentDefaultConfig(t *testing.T, osType string) { a := new(Agent) translator.SetTargetPlatform(osType) var input interface{} e := json.Unmarshal([]byte(`{"agent":{"metrics_collection_interval":59, "region": "us-west-2"}}`), &input) if e != nil { assert.Fail(t, e.Error()) } _, val := a.ApplyRule(input) agent := map[string]interface{}{ "debug": false, "flush_interval": "10s", "flush_jitter": "0s", "hostname": "", "interval": "59s", "logfile": Linux_Darwin_Default_Log_Dir, "metric_batch_size": 1000, "metric_buffer_limit": 10000, "omit_hostname": false, "precision": "", "quiet": false, "round_interval": false, "collection_jitter": "0s", "logtarget": "lumberjack", } assert.Equal(t, agent, val, "Expect to be equal") } func TestAgentSpecificConfig(t *testing.T) { agentSpecificConfig(t, config.OS_TYPE_LINUX) agentSpecificConfig(t, config.OS_TYPE_DARWIN) } func agentSpecificConfig(t *testing.T, osType string) { translator.SetTargetPlatform(osType) a := new(Agent) var input interface{} e := json.Unmarshal([]byte(`{"agent":{"debug":true, "region": "us-west-2"}}`), &input) if e != nil { assert.Fail(t, e.Error()) } _, val := a.ApplyRule(input) agent := map[string]interface{}{ "debug": true, "flush_interval": "10s", "flush_jitter": "0s", "hostname": "", "interval": "60s", "logfile": Linux_Darwin_Default_Log_Dir, "logtarget": logger.LogTargetLumberjack, "metric_batch_size": 1000, "metric_buffer_limit": 10000, "omit_hostname": false, "precision": "", "quiet": false, "round_interval": false, "collection_jitter": "0s", } assert.Equal(t, agent, val, "Expect to be equal") } func TestNoAgentConfig(t *testing.T) { noAgentConfig(t, config.OS_TYPE_LINUX) noAgentConfig(t, config.OS_TYPE_DARWIN) } func noAgentConfig(t *testing.T, osType string) { translator.SetTargetPlatform(osType) a := new(Agent) var input interface{} e := json.Unmarshal([]byte(`{"agent":{"region": "us-west-2"}}`), &input) if e != nil { assert.Fail(t, e.Error()) } _, val := a.ApplyRule(input) agent := map[string]interface{}{ "debug": false, "flush_interval": "10s", "flush_jitter": "0s", "hostname": "", "interval": "60s", "logfile": Linux_Darwin_Default_Log_Dir, "logtarget": logger.LogTargetLumberjack, "metric_batch_size": 1000, "metric_buffer_limit": 10000, "omit_hostname": false, "precision": "", "quiet": false, "round_interval": false, "collection_jitter": "0s", } assert.Equal(t, agent, val, "Expect to be equal") } func TestInternal(t *testing.T) { internal(t, config.OS_TYPE_LINUX) internal(t, config.OS_TYPE_DARWIN) } func internal(t *testing.T, osType string) { a := new(Agent) translator.SetTargetPlatform(osType) var input interface{} e := json.Unmarshal([]byte(`{"agent":{"internal": true}}`), &input) if e != nil { assert.Fail(t, e.Error()) } agent := map[string]interface{}{ "debug": false, "flush_interval": "10s", "flush_jitter": "0s", "hostname": "", "interval": "60s", "logfile": Linux_Darwin_Default_Log_Dir, "logtarget": logger.LogTargetLumberjack, "metric_batch_size": 1000, "metric_buffer_limit": 10000, "omit_hostname": false, "precision": "", "quiet": false, "round_interval": false, "collection_jitter": "0s", } _, val := a.ApplyRule(input) assert.Equal(t, agent, val, "Expect to be equal") assert.True(t, Global_Config.Internal) e = json.Unmarshal([]byte(`{"agent":{"internal": false}}`), &input) if e != nil { assert.Fail(t, e.Error()) } _, val = a.ApplyRule(input) assert.Equal(t, agent, val, "Expect to be equal") assert.False(t, Global_Config.Internal) } func saveProxyEnv() { httpProxy = os.Getenv("http_proxy") httpsProxy = os.Getenv("https_proxy") noProxy = os.Getenv("no_proxy") } func restoreProxyEnv() { os.Setenv("http_proxy", httpProxy) os.Setenv("https_proxy", httpsProxy) os.Setenv("no_proxy", noProxy) }
[ "\"http_proxy\"", "\"https_proxy\"", "\"no_proxy\"" ]
[]
[ "http_proxy", "no_proxy", "https_proxy" ]
[]
["http_proxy", "no_proxy", "https_proxy"]
go
3
0
feconf.py
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Stores various configuration options and constants for Oppia.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import copy import datetime import os from constants import constants from typing import Dict, Text # isort:skip # pylint: disable=unused-import # The datastore model ID for the list of featured activity references. This # value should not be changed. ACTIVITY_REFERENCE_LIST_FEATURED = 'featured' ALL_ACTIVITY_REFERENCE_LIST_TYPES = [ACTIVITY_REFERENCE_LIST_FEATURED] # The values which a post_commit_status can have: public, private. POST_COMMIT_STATUS_PUBLIC = 'public' POST_COMMIT_STATUS_PRIVATE = 'private' # Whether to unconditionally log info messages. DEBUG = False # When DEV_MODE is true check that we are running in development environment. # The SERVER_SOFTWARE environment variable does not exist in Travis, hence the # need for an explicit check. if (constants.DEV_MODE and os.getenv('SERVER_SOFTWARE') and not os.getenv('SERVER_SOFTWARE', default='').startswith('Development')): raise Exception('DEV_MODE can\'t be true on production.') CLASSIFIERS_DIR = os.path.join('extensions', 'classifiers') TESTS_DATA_DIR = os.path.join('core', 'tests', 'data') SAMPLE_EXPLORATIONS_DIR = os.path.join('data', 'explorations') SAMPLE_COLLECTIONS_DIR = os.path.join('data', 'collections') CONTENT_VALIDATION_DIR = os.path.join('core', 'domain') # backend_prod_files contain processed JS and HTML files that are served by # Jinja, we are moving away from Jinja so this folder might not be needed later # (#6964) EXTENSIONS_DIR_PREFIX = ( 'backend_prod_files' if not constants.DEV_MODE else '') ACTIONS_DIR = ( os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'actions')) ISSUES_DIR = ( os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'issues')) INTERACTIONS_DIR = ( os.path.join('extensions', 'interactions')) INTERACTIONS_LEGACY_SPECS_FILE_DIR = ( os.path.join(INTERACTIONS_DIR, 'legacy_interaction_specs_by_state_version')) INTERACTIONS_SPECS_FILE_PATH = ( os.path.join(INTERACTIONS_DIR, 'interaction_specs.json')) RTE_EXTENSIONS_DIR = ( os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'rich_text_components')) RTE_EXTENSIONS_DEFINITIONS_PATH = ( os.path.join('assets', 'rich_text_components_definitions.ts')) OBJECT_TEMPLATES_DIR = os.path.join('extensions', 'objects', 'templates') # Choose production templates folder when we are in production mode. FRONTEND_TEMPLATES_DIR = ( os.path.join('webpack_bundles') if constants.DEV_MODE else os.path.join('backend_prod_files', 'webpack_bundles')) DEPENDENCIES_TEMPLATES_DIR = ( os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'dependencies')) VALUE_GENERATORS_DIR_FOR_JS = os.path.join( 'local_compiled_js', 'extensions', 'value_generators') VALUE_GENERATORS_DIR = os.path.join('extensions', 'value_generators') VISUALIZATIONS_DIR = os.path.join( 'extensions', 'visualizations') VISUALIZATIONS_DIR_FOR_JS = os.path.join( 'local_compiled_js', 'extensions', 'visualizations') OBJECT_DEFAULT_VALUES_FILE_PATH = os.path.join( 'extensions', 'objects', 'object_defaults.json') RULES_DESCRIPTIONS_FILE_PATH = os.path.join( os.getcwd(), 'extensions', 'interactions', 'rule_templates.json') HTML_FIELD_TYPES_TO_RULE_SPECS_FILE_PATH = os.path.join( os.getcwd(), 'extensions', 'interactions', 'html_field_types_to_rule_specs.json') LEGACY_HTML_FIELD_TYPES_TO_RULE_SPECS_FILE_PATH_FILE_DIR = os.path.join( os.getcwd(), 'extensions', 'interactions', 'legacy_html_field_types_to_rule_specs_by_state_version') # A mapping of interaction ids to classifier properties. # TODO(#10217): As of now we support only one algorithm per interaction. # However, we do have the necessary storage infrastructure to support multiple # algorithms per interaction. Hence, whenever we find a secondary algorithm # candidate for any of the supported interactions, the logical functions to # support multiple algorithms need to be implemented. INTERACTION_CLASSIFIER_MAPPING = { 'TextInput': { 'algorithm_id': 'TextClassifier', 'algorithm_version': 1 }, } # Classifier job time to live (in mins). CLASSIFIER_JOB_TTL_MINS = 5 TRAINING_JOB_STATUS_COMPLETE = 'COMPLETE' TRAINING_JOB_STATUS_FAILED = 'FAILED' TRAINING_JOB_STATUS_NEW = 'NEW' TRAINING_JOB_STATUS_PENDING = 'PENDING' ALLOWED_TRAINING_JOB_STATUSES = [ TRAINING_JOB_STATUS_COMPLETE, TRAINING_JOB_STATUS_FAILED, TRAINING_JOB_STATUS_NEW, TRAINING_JOB_STATUS_PENDING ] # Allowed formats of how HTML is present in rule specs. HTML_RULE_VARIABLE_FORMAT_SET = 'set' HTML_RULE_VARIABLE_FORMAT_STRING = 'string' HTML_RULE_VARIABLE_FORMAT_LIST_OF_SETS = 'listOfSets' ALLOWED_HTML_RULE_VARIABLE_FORMATS = [ HTML_RULE_VARIABLE_FORMAT_SET, HTML_RULE_VARIABLE_FORMAT_STRING, HTML_RULE_VARIABLE_FORMAT_LIST_OF_SETS ] ANSWER_TYPE_LIST_OF_SETS_OF_HTML = 'ListOfSetsOfHtmlStrings' ANSWER_TYPE_SET_OF_HTML = 'SetOfHtmlString' # The maximum number of characters allowed for userbio length. MAX_BIO_LENGTH_IN_CHARS = 2000 ALLOWED_TRAINING_JOB_STATUS_CHANGES = { TRAINING_JOB_STATUS_COMPLETE: [], TRAINING_JOB_STATUS_NEW: [TRAINING_JOB_STATUS_PENDING], TRAINING_JOB_STATUS_PENDING: [TRAINING_JOB_STATUS_COMPLETE, TRAINING_JOB_STATUS_FAILED], TRAINING_JOB_STATUS_FAILED: [TRAINING_JOB_STATUS_NEW] } # Allowed formats of how HTML is present in rule specs. HTML_RULE_VARIABLE_FORMAT_SET = 'set' HTML_RULE_VARIABLE_FORMAT_STRING = 'string' HTML_RULE_VARIABLE_FORMAT_LIST_OF_SETS = 'listOfSets' ALLOWED_HTML_RULE_VARIABLE_FORMATS = [ HTML_RULE_VARIABLE_FORMAT_SET, HTML_RULE_VARIABLE_FORMAT_STRING, HTML_RULE_VARIABLE_FORMAT_LIST_OF_SETS ] ANSWER_TYPE_LIST_OF_SETS_OF_HTML = 'ListOfSetsOfHtmlStrings' ANSWER_TYPE_SET_OF_HTML = 'SetOfHtmlString' ENTITY_TYPE_BLOG_POST = 'blog_post' ENTITY_TYPE_EXPLORATION = 'exploration' ENTITY_TYPE_TOPIC = 'topic' ENTITY_TYPE_SKILL = 'skill' ENTITY_TYPE_STORY = 'story' ENTITY_TYPE_QUESTION = 'question' ENTITY_TYPE_VOICEOVER_APPLICATION = 'voiceover_application' IMAGE_CONTEXT_QUESTION_SUGGESTIONS = 'question_suggestions' IMAGE_CONTEXT_EXPLORATION_SUGGESTIONS = 'exploration_suggestions' MAX_TASK_MODELS_PER_FETCH = 25 MAX_TASK_MODELS_PER_HISTORY_PAGE = 10 PERIOD_TO_HARD_DELETE_MODELS_MARKED_AS_DELETED = datetime.timedelta(weeks=8) PERIOD_TO_MARK_MODELS_AS_DELETED = datetime.timedelta(weeks=4) # The maximum number of activities allowed in the playlist of the learner. This # limit applies to both the explorations playlist and the collections playlist. MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT = 10 # The maximum number of goals allowed in the learner goals of the learner. MAX_CURRENT_GOALS_COUNT = 5 # The minimum number of training samples required for training a classifier. MIN_TOTAL_TRAINING_EXAMPLES = 50 # The minimum number of assigned labels required for training a classifier. MIN_ASSIGNED_LABELS = 2 # Default label for classification algorithms. DEFAULT_CLASSIFIER_LABEL = '_default' # The maximum number of results to retrieve in a datastore query. DEFAULT_QUERY_LIMIT = 1000 # The maximum number of results to retrieve in a datastore query # for top rated published explorations in /library page. NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE = 8 # The maximum number of results to retrieve in a datastore query # for recently published explorations in /library page. RECENTLY_PUBLISHED_QUERY_LIMIT_FOR_LIBRARY_PAGE = 8 # The maximum number of results to retrieve in a datastore query # for top rated published explorations in /library/top_rated page. NUMBER_OF_TOP_RATED_EXPLORATIONS_FULL_PAGE = 20 # The maximum number of results to retrieve in a datastore query # for recently published explorations in /library/recently_published page. RECENTLY_PUBLISHED_QUERY_LIMIT_FULL_PAGE = 20 # The maximum number of days a feedback report can be saved in storage before it # must be scrubbed. APP_FEEDBACK_REPORT_MAXIMUM_DAYS = datetime.timedelta(days=90) # The minimum version of the Android feedback report info blob schema. MINIMUM_ANDROID_REPORT_SCHEMA_VERSION = 1 # The current version of the Android feedback report info blob schema. CURRENT_ANDROID_REPORT_SCHEMA_VERSION = 1 # The current version of the web feedback report info blob schema. MINIMUM_WEB_REPORT_SCHEMA_VERSION = 1 # The current version of the web feedback report info blob schema. CURRENT_WEB_REPORT_SCHEMA_VERSION = 1 # The current version of the app feedback report daily stats blob schema. CURRENT_FEEDBACK_REPORT_STATS_SCHEMA_VERSION = 1 # The minimum version of the app feedback report daily stats blob schema. MINIMUM_FEEDBACK_REPORT_STATS_SCHEMA_VERSION = 1 # The current version of the dashboard stats blob schema. If any backward- # incompatible changes are made to the stats blob schema in the data store, # this version number must be changed. CURRENT_DASHBOARD_STATS_SCHEMA_VERSION = 1 # The earliest supported version of the exploration states blob schema. EARLIEST_SUPPORTED_STATE_SCHEMA_VERSION = 41 # The current version of the exploration states blob schema. If any backward- # incompatible changes are made to the states blob schema in the data store, # this version number must be changed and the exploration migration job # executed. CURRENT_STATE_SCHEMA_VERSION = 45 # The current version of the all collection blob schemas (such as the nodes # structure within the Collection domain object). If any backward-incompatible # changes are made to any of the blob schemas in the data store, this version # number must be changed. CURRENT_COLLECTION_SCHEMA_VERSION = 6 # The current version of story contents dict in the story schema. CURRENT_STORY_CONTENTS_SCHEMA_VERSION = 5 # The current version of skill contents dict in the skill schema. CURRENT_SKILL_CONTENTS_SCHEMA_VERSION = 2 # The current version of misconceptions dict in the skill schema. CURRENT_MISCONCEPTIONS_SCHEMA_VERSION = 3 # The current version of rubric dict in the skill schema. CURRENT_RUBRIC_SCHEMA_VERSION = 3 # The current version of subtopics dict in the topic schema. CURRENT_SUBTOPIC_SCHEMA_VERSION = 4 # The current version of story reference dict in the topic schema. CURRENT_STORY_REFERENCE_SCHEMA_VERSION = 1 # The current version of page_contents dict in the subtopic page schema. CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION = 2 # This value should be updated in the event of any # StateAnswersModel.submitted_answer_list schema change. CURRENT_STATE_ANSWERS_SCHEMA_VERSION = 1 # This value should be updated if the schema of LearnerAnswerInfo # dict schema changes. CURRENT_LEARNER_ANSWER_INFO_SCHEMA_VERSION = 1 # This value should be updated if the schema of PlatformParameterRule dict # schema changes. CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION = 1 # The default number of exploration tiles to load at a time in the search # results page. SEARCH_RESULTS_PAGE_SIZE = 20 # The default number of commits to show on a page in the exploration history # tab. COMMIT_LIST_PAGE_SIZE = 50 # The default number of items to show on a page in the exploration feedback # tab. FEEDBACK_TAB_PAGE_SIZE = 20 # The maximum number of top unresolved answers which should be aggregated # from all of the submitted answers. TOP_UNRESOLVED_ANSWERS_LIMIT = 20 # Default title for a newly-minted exploration. DEFAULT_EXPLORATION_TITLE = '' # Default category for a newly-minted exploration. DEFAULT_EXPLORATION_CATEGORY = '' # Default objective for a newly-minted exploration. DEFAULT_EXPLORATION_OBJECTIVE = '' # NOTE TO DEVELOPERS: If any of the 5 constants below are modified, the # corresponding field in NEW_STATE_TEMPLATE in constants.js also has to be # modified. # Default name for the initial state of an exploration. DEFAULT_INIT_STATE_NAME = 'Introduction' # Default content id for the state's content. DEFAULT_NEW_STATE_CONTENT_ID = 'content' # Default content id for the interaction's default outcome. DEFAULT_OUTCOME_CONTENT_ID = 'default_outcome' # Default content id for the explanation in the concept card of a skill. DEFAULT_EXPLANATION_CONTENT_ID = 'explanation' # Content id assigned to rule inputs that do not match any interaction # customization argument choices. INVALID_CONTENT_ID = 'invalid_content_id' # Default recorded_voiceovers dict for a default state template. DEFAULT_RECORDED_VOICEOVERS = { 'voiceovers_mapping': { 'content': {}, 'default_outcome': {} } } # type: Dict[Text, Dict[Text, Dict[Text, Text]]] # Default written_translations dict for a default state template. DEFAULT_WRITTEN_TRANSLATIONS = { 'translations_mapping': { 'content': {}, 'default_outcome': {} } } # type: Dict[Text, Dict[Text, Dict[Text, Text]]] # The default content text for the initial state of an exploration. DEFAULT_INIT_STATE_CONTENT_STR = '' # Whether new explorations should have automatic text-to-speech enabled # by default. DEFAULT_AUTO_TTS_ENABLED = True # Default title for a newly-minted collection. DEFAULT_COLLECTION_TITLE = '' # Default category for a newly-minted collection. DEFAULT_COLLECTION_CATEGORY = '' # Default objective for a newly-minted collection. DEFAULT_COLLECTION_OBJECTIVE = '' # Default description for a newly-minted story. DEFAULT_STORY_DESCRIPTION = '' # Default notes for a newly-minted story. DEFAULT_STORY_NOTES = '' # Default explanation for a newly-minted skill. DEFAULT_SKILL_EXPLANATION = '' # Default name for a newly-minted misconception. DEFAULT_MISCONCEPTION_NAME = '' # Default notes for a newly-minted misconception. DEFAULT_MISCONCEPTION_NOTES = '' # Default feedback for a newly-minted misconception. DEFAULT_MISCONCEPTION_FEEDBACK = '' # Default content_id for explanation subtitled html. DEFAULT_SKILL_EXPLANATION_CONTENT_ID = 'explanation' # Default description for a newly-minted topic. DEFAULT_TOPIC_DESCRIPTION = '' # Default abbreviated name for a newly-minted topic. DEFAULT_ABBREVIATED_TOPIC_NAME = '' # Default content id for the subtopic page's content. DEFAULT_SUBTOPIC_PAGE_CONTENT_ID = 'content' # Default ID of VM which is used for training classifier. DEFAULT_VM_ID = 'vm_default' # Shared secret key for default VM. DEFAULT_VM_SHARED_SECRET = '1a2b3c4e' IMAGE_FORMAT_JPEG = 'jpeg' IMAGE_FORMAT_PNG = 'png' IMAGE_FORMAT_GIF = 'gif' IMAGE_FORMAT_SVG = 'svg' # An array containing the accepted image formats (as determined by the imghdr # module) and the corresponding allowed extensions in the filenames of uploaded # images. ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS = { IMAGE_FORMAT_JPEG: ['jpg', 'jpeg'], IMAGE_FORMAT_PNG: ['png'], IMAGE_FORMAT_GIF: ['gif'], IMAGE_FORMAT_SVG: ['svg'] } # An array containing the image formats that can be compressed. COMPRESSIBLE_IMAGE_FORMATS = [IMAGE_FORMAT_JPEG, IMAGE_FORMAT_PNG] # An array containing the accepted audio extensions for uploaded files and # the corresponding MIME types. ACCEPTED_AUDIO_EXTENSIONS = { 'mp3': ['audio/mp3'] } # Prefix for data sent from the server to the client via JSON. XSSI_PREFIX = ')]}\'\n' # A regular expression for alphanumeric characters. ALPHANUMERIC_REGEX = r'^[A-Za-z0-9]+$' # These are here rather than in rating_services.py to avoid import # circularities with exp_services. # TODO(Jacob): Refactor exp_services to remove this problem. _EMPTY_RATINGS = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0} def get_empty_ratings(): # type: () -> Dict[Text, int] """Returns a copy of the empty ratings object. Returns: dict. Copy of the '_EMPTY_RATINGS' dict object which contains the empty ratings. """ return copy.deepcopy(_EMPTY_RATINGS) # To use mailchimp email service. BULK_EMAIL_SERVICE_PROVIDER_MAILCHIMP = 'mailchimp_email_service' # Use GAE email service by default. BULK_EMAIL_SERVICE_PROVIDER = BULK_EMAIL_SERVICE_PROVIDER_MAILCHIMP # Empty scaled average rating as a float. EMPTY_SCALED_AVERAGE_RATING = 0.0 # To use mailgun email service. EMAIL_SERVICE_PROVIDER_MAILGUN = 'mailgun_email_service' # Use GAE email service by default. EMAIL_SERVICE_PROVIDER = EMAIL_SERVICE_PROVIDER_MAILGUN # If the Mailgun email API is used, the "None" below should be replaced # with the Mailgun API key. MAILGUN_API_KEY = None # If the Mailgun email API is used, the "None" below should be replaced # with the Mailgun domain name (ending with mailgun.org). MAILGUN_DOMAIN_NAME = None # Audience ID of the mailing list for Oppia in Mailchimp. MAILCHIMP_AUDIENCE_ID = None # Mailchimp API Key. MAILCHIMP_API_KEY = None # Mailchimp username. MAILCHIMP_USERNAME = None # Mailchimp secret, used to authenticate webhook requests. MAILCHIMP_WEBHOOK_SECRET = None ES_LOCALHOST_PORT = 9200 # NOTE TO RELEASE COORDINATORS: Replace this with the correct ElasticSearch # auth information during deployment. ES_CLOUD_ID = None ES_USERNAME = None ES_PASSWORD = None # NOTE TO RELEASE COORDINATORS: Replace this with the correct Redis Host and # Port when switching to prod server. Keep this in sync with redis.conf in the # root folder. Specifically, REDISPORT should always be the same as the port in # redis.conf. REDISHOST = 'localhost' REDISPORT = 6379 # NOTE TO RELEASE COORDINATORS: Replace this project id with the correct oppia # project id when switching to the prod server. OPPIA_PROJECT_ID = 'dev-project-id' GOOGLE_APP_ENGINE_REGION = 'us-central1' # Committer id for system actions. The username for the system committer # (i.e. admin) is also 'admin'. SYSTEM_COMMITTER_ID = 'admin' # Domain name for email address. INCOMING_EMAILS_DOMAIN_NAME = 'example.com' SYSTEM_EMAIL_ADDRESS = '[email protected]' SYSTEM_EMAIL_NAME = '.' ADMIN_EMAIL_ADDRESS = '[email protected]' NOREPLY_EMAIL_ADDRESS = '[email protected]' # Ensure that SYSTEM_EMAIL_ADDRESS and ADMIN_EMAIL_ADDRESS are both valid and # correspond to owners of the app before setting this to True. If # SYSTEM_EMAIL_ADDRESS is not that of an app owner, email messages from this # address cannot be sent. If True then emails can be sent to any user. CAN_SEND_EMAILS = False # If you want to turn on this facility please check the email templates in the # send_role_notification_email() function in email_manager.py and modify them # accordingly. CAN_SEND_EDITOR_ROLE_EMAILS = False # If enabled then emails will be sent to creators for feedback messages. CAN_SEND_FEEDBACK_MESSAGE_EMAILS = False # If enabled subscription emails will be sent to that user. CAN_SEND_SUBSCRIPTION_EMAILS = False # Time to wait before sending feedback message emails (currently set to 1 # hour). DEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS = 3600 # Whether to send an email when new feedback message is received for # an exploration. DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE = True # Whether to send an email to all the creator's subscribers when he/she # publishes an exploration. DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE = True # Whether exploration feedback emails are muted, # when the user has not specified a preference. DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE = False # Whether exploration suggestion emails are muted, # when the user has not specified a preference. DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE = False # Whether to send email updates to a user who has not specified a preference. DEFAULT_EMAIL_UPDATES_PREFERENCE = False # Whether to send an invitation email when the user is granted # new role permissions in an exploration. DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE = True # Whether to require an email to be sent, following a moderator action. REQUIRE_EMAIL_ON_MODERATOR_ACTION = False # Timespan in minutes before allowing duplicate emails. DUPLICATE_EMAIL_INTERVAL_MINS = 2 # Number of digits after decimal to which the average ratings value in the # dashboard is rounded off to. AVERAGE_RATINGS_DASHBOARD_PRECISION = 2 # Whether to enable maintenance mode on the site. For non-admins, this redirects # all HTTP requests to the maintenance page. This is the only check which # determines whether the site is in maintenance mode to avoid queries to the # database by non-admins. ENABLE_MAINTENANCE_MODE = False # The interactions permissible for a question. ALLOWED_QUESTION_INTERACTION_IDS = [ 'TextInput', 'MultipleChoiceInput', 'NumericInput'] # Flag to disable sending emails related to reviews for suggestions. To be # flipped after deciding (and implementing) whether a user should be scored # only for curated lessons. SEND_SUGGESTION_REVIEW_RELATED_EMAILS = False # To prevent recording scores for users until details like whether to score # users for only curated lessons is confirmed. ENABLE_RECORDING_OF_SCORES = False # No. of pretest questions to display. NUM_PRETEST_QUESTIONS = 0 EMAIL_INTENT_SIGNUP = 'signup' EMAIL_INTENT_DAILY_BATCH = 'daily_batch' EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION = 'editor_role_notification' EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION = 'feedback_message_notification' EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION = 'subscription_notification' EMAIL_INTENT_SUGGESTION_NOTIFICATION = 'suggestion_notification' EMAIL_INTENT_REPORT_BAD_CONTENT = 'report_bad_content' EMAIL_INTENT_MARKETING = 'marketing' EMAIL_INTENT_UNPUBLISH_EXPLORATION = 'unpublish_exploration' EMAIL_INTENT_DELETE_EXPLORATION = 'delete_exploration' EMAIL_INTENT_QUERY_STATUS_NOTIFICATION = 'query_status_notification' EMAIL_INTENT_ONBOARD_REVIEWER = 'onboard_reviewer' EMAIL_INTENT_REMOVE_REVIEWER = 'remove_reviewer' EMAIL_INTENT_ADDRESS_CONTRIBUTOR_DASHBOARD_SUGGESTIONS = ( 'address_contributor_dashboard_suggestions' ) EMAIL_INTENT_REVIEW_CREATOR_DASHBOARD_SUGGESTIONS = ( 'review_creator_dashboard_suggestions') EMAIL_INTENT_REVIEW_CONTRIBUTOR_DASHBOARD_SUGGESTIONS = ( 'review_contributor_dashboard_suggestions' ) EMAIL_INTENT_ADD_CONTRIBUTOR_DASHBOARD_REVIEWERS = ( 'add_contributor_dashboard_reviewers' ) EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES = 'voiceover_application_updates' EMAIL_INTENT_ACCOUNT_DELETED = 'account_deleted' # Possible intents for email sent in bulk. BULK_EMAIL_INTENT_MARKETING = 'bulk_email_marketing' BULK_EMAIL_INTENT_IMPROVE_EXPLORATION = 'bulk_email_improve_exploration' BULK_EMAIL_INTENT_CREATE_EXPLORATION = 'bulk_email_create_exploration' BULK_EMAIL_INTENT_CREATOR_REENGAGEMENT = 'bulk_email_creator_reengagement' BULK_EMAIL_INTENT_LEARNER_REENGAGEMENT = 'bulk_email_learner_reengagement' BULK_EMAIL_INTENT_ML_JOB_FAILURE = 'bulk_email_ml_job_failure' BULK_EMAIL_INTENT_TEST = 'bulk_email_test' MESSAGE_TYPE_FEEDBACK = 'feedback' MESSAGE_TYPE_SUGGESTION = 'suggestion' MODERATOR_ACTION_UNPUBLISH_EXPLORATION = 'unpublish_exploration' DEFAULT_SALUTATION_HTML_FN = ( lambda recipient_username: 'Hi %s,' % recipient_username) DEFAULT_SIGNOFF_HTML_FN = ( lambda sender_username: ( 'Thanks!<br>%s (Oppia moderator)' % sender_username)) VALID_MODERATOR_ACTIONS = { MODERATOR_ACTION_UNPUBLISH_EXPLORATION: { 'email_config': 'unpublish_exploration_email_html_body', 'email_subject_fn': ( lambda exp_title: ( 'Your Oppia exploration "%s" has been unpublished' % exp_title) ), 'email_intent': 'unpublish_exploration', 'email_salutation_html_fn': DEFAULT_SALUTATION_HTML_FN, 'email_signoff_html_fn': DEFAULT_SIGNOFF_HTML_FN, }, } # When the site terms were last updated, in UTC. REGISTRATION_PAGE_LAST_UPDATED_UTC = datetime.datetime(2015, 10, 14, 2, 40, 0) # Format of string for dashboard statistics logs. # NOTE TO DEVELOPERS: This format should not be changed, since it is used in # the existing storage models for UserStatsModel. DASHBOARD_STATS_DATETIME_STRING_FORMAT = '%Y-%m-%d' # We generate images for existing math rich text components in batches. This # gives the maximum size for a batch of Math SVGs in bytes. MAX_SIZE_OF_MATH_SVGS_BATCH_BYTES = 31 * 1024 * 1024 # The maximum size of an uploaded file, in bytes. MAX_FILE_SIZE_BYTES = 1048576 # The maximum playback length of an audio file, in seconds. MAX_AUDIO_FILE_LENGTH_SEC = 300 # The maximum number of questions to be fetched at one time. MAX_QUESTIONS_FETCHABLE_AT_ONE_TIME = 20 # The minimum score required for a user to review suggestions of a particular # category. MINIMUM_SCORE_REQUIRED_TO_REVIEW = 10 # The maximum number of skills to be requested at one time when fetching # questions. MAX_NUMBER_OF_SKILL_IDS = 20 # The maximum number of blog post cards to be visible on each page in blog # homepage. MAX_NUM_CARDS_TO_DISPLAY_ON_BLOG_HOMEPAGE = 10 # The maximum number of blog post cards to be visible on each page in author # specific blog post page. MAX_NUM_CARDS_TO_DISPLAY_ON_AUTHOR_SPECIFIC_BLOG_POST_PAGE = 12 # The maximum number of blog post cards to be visible as suggestions on the # blog post page. MAX_POSTS_TO_RECOMMEND_AT_END_OF_BLOG_POST = 2 # The prefix for an 'accepted suggestion' commit message. COMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX = 'Accepted suggestion by' # User id and username for exploration migration bot. Commits made by this bot # are not reflected in the exploration summary models, but are recorded in the # exploration commit log. MIGRATION_BOT_USER_ID = 'OppiaMigrationBot' MIGRATION_BOT_USERNAME = 'OppiaMigrationBot' # User id and username for suggestion bot. This bot will be used to accept # suggestions automatically after a threshold time. SUGGESTION_BOT_USER_ID = 'OppiaSuggestionBot' SUGGESTION_BOT_USERNAME = 'OppiaSuggestionBot' # The system usernames are reserved usernames. Before adding new value to this # dict, make sure that there aren't any similar usernames in the datastore. # Note: All bot user IDs and usernames should start with "Oppia" and end with # "Bot". SYSTEM_USERS = { SYSTEM_COMMITTER_ID: SYSTEM_COMMITTER_ID, MIGRATION_BOT_USER_ID: MIGRATION_BOT_USERNAME, SUGGESTION_BOT_USER_ID: SUGGESTION_BOT_USERNAME } # Ids and locations of the permitted extensions. ALLOWED_RTE_EXTENSIONS = { 'Collapsible': { 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Collapsible') }, 'Image': { 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Image') }, 'Link': { 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Link') }, 'Math': { 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Math') }, 'Svgdiagram': { 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'svgdiagram') }, 'Tabs': { 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Tabs') }, 'Video': { 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Video') }, } # The list of interaction IDs which correspond to interactions that set their # is_linear property to true. Linear interactions do not support branching and # thus only allow for default answer classification. This value is guarded by a # test in extensions.interactions.base_test. LINEAR_INTERACTION_IDS = ['Continue'] # Demo explorations to load through the admin panel. The id assigned to each # exploration is based on the key of the exploration in this dict, so ensure it # doesn't change once it's in the list. Only integer-based indices should be # used in this list, as it maintains backward compatibility with how demo # explorations used to be assigned IDs. The value of each entry in this dict is # either a YAML file or a directory (depending on whether it ends in .yaml). # These explorations can be found under data/explorations. DEMO_EXPLORATIONS = { u'0': 'welcome', u'1': 'multiples.yaml', u'2': 'binary_search', u'3': 'root_linear_coefficient_theorem', u'4': 'three_balls', # TODO(bhenning): Replace demo exploration '5' with a new exploration # described in #1376. u'6': 'boot_verbs.yaml', u'7': 'hola.yaml', u'8': 'adventure.yaml', u'9': 'pitch_perfect.yaml', u'10': 'test_interactions', u'11': 'modeling_graphs', u'12': 'protractor_test_1.yaml', u'13': 'solar_system', u'14': 'about_oppia.yaml', u'15': 'classifier_demo_exploration.yaml', u'16': 'all_interactions', u'17': 'audio_test', # Exploration with ID 18 was used for testing CodeClassifier functionality # which has been removed (#10060). u'19': 'example_exploration_in_collection1.yaml', u'20': 'example_exploration_in_collection2.yaml', u'21': 'example_exploration_in_collection3.yaml', u'22': 'protractor_mobile_test_exploration.yaml', u'23': 'rating_test.yaml', u'24': 'learner_flow_test.yaml', u'25': 'exploration_player_test.yaml', u'26': 'android_interactions', } DEMO_COLLECTIONS = { u'0': 'welcome_to_collections.yaml', u'1': 'learner_flow_test_collection.yaml' } # IDs of explorations which should not be displayable in either the learner or # editor views. DISABLED_EXPLORATION_IDS = ['5'] # Oppia Google Group URL. GOOGLE_GROUP_URL = ( 'https://groups.google.com/forum/?place=forum/oppia#!forum/oppia') # External URL for the Foundation site. FOUNDATION_SITE_URL = 'http://oppiafoundation.org' # NOTE TO RELEASE COORDINATORS: External URL for the oppia production site. # Change to the correct url for internal testing in the testing production # environment. # Change to the production URL when deploying to production site. OPPIA_SITE_URL = 'http://localhost:8181' # Prefix for all taskqueue-related URLs. TASKQUEUE_URL_PREFIX = '/task' TASK_URL_FEEDBACK_MESSAGE_EMAILS = ( '%s/email/batchfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX) TASK_URL_FEEDBACK_STATUS_EMAILS = ( '%s/email/feedbackthreadstatuschangeemailhandler' % TASKQUEUE_URL_PREFIX) TASK_URL_FLAG_EXPLORATION_EMAILS = ( '%s/email/flagexplorationemailhandler' % TASKQUEUE_URL_PREFIX) TASK_URL_INSTANT_FEEDBACK_EMAILS = ( '%s/email/instantfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX) TASK_URL_SUGGESTION_EMAILS = ( '%s/email/suggestionemailhandler' % TASKQUEUE_URL_PREFIX) TASK_URL_DEFERRED = ( '%s/deferredtaskshandler' % TASKQUEUE_URL_PREFIX) # TODO(sll): Add all other URLs here. ADMIN_URL = '/admin' ADMIN_ROLE_HANDLER_URL = '/adminrolehandler' BLOG_ADMIN_PAGE_URL = '/blog-admin' BLOG_ADMIN_ROLE_HANDLER_URL = '/blogadminrolehandler' BLOG_DASHBOARD_DATA_URL = '/blogdashboardhandler/data' BLOG_DASHBOARD_URL = '/blog-dashboard' BLOG_EDITOR_DATA_URL_PREFIX = '/blogeditorhandler/data' BULK_EMAIL_WEBHOOK_ENDPOINT = '/bulk_email_webhook_endpoint' BLOG_HOMEPAGE_DATA_URL = '/blogdatahandler/data' BLOG_HOMEPAGE_URL = '/blog' AUTHOR_SPECIFIC_BLOG_POST_PAGE_URL_PREFIX = '/blog/author' CLASSROOM_DATA_HANDLER = '/classroom_data_handler' COLLECTION_DATA_URL_PREFIX = '/collection_handler/data' COLLECTION_EDITOR_DATA_URL_PREFIX = '/collection_editor_handler/data' COLLECTION_SUMMARIES_DATA_URL = '/collectionsummarieshandler/data' COLLECTION_RIGHTS_PREFIX = '/collection_editor_handler/rights' COLLECTION_PUBLISH_PREFIX = '/collection_editor_handler/publish' COLLECTION_UNPUBLISH_PREFIX = '/collection_editor_handler/unpublish' COLLECTION_EDITOR_URL_PREFIX = '/collection_editor/create' COLLECTION_URL_PREFIX = '/collection' CONCEPT_CARD_DATA_URL_PREFIX = '/concept_card_handler' CONTRIBUTOR_DASHBOARD_URL = '/contributor-dashboard' CONTRIBUTOR_DASHBOARD_ADMIN_URL = '/contributor-dashboard-admin' CONTRIBUTOR_OPPORTUNITIES_DATA_URL = '/opportunitiessummaryhandler' CREATOR_DASHBOARD_DATA_URL = '/creatordashboardhandler/data' CREATOR_DASHBOARD_URL = '/creator-dashboard' CSRF_HANDLER_URL = '/csrfhandler' CUSTOM_NONPROFITS_LANDING_PAGE_URL = '/nonprofits' CUSTOM_PARENTS_LANDING_PAGE_URL = '/parents' CUSTOM_PARTNERS_LANDING_PAGE_URL = '/partners' CUSTOM_TEACHERS_LANDING_PAGE_URL = '/teachers' CUSTOM_VOLUNTEERS_LANDING_PAGE_URL = '/volunteers' DASHBOARD_CREATE_MODE_URL = '%s?mode=create' % CREATOR_DASHBOARD_URL EDITOR_URL_PREFIX = '/create' EXPLORATION_DATA_PREFIX = '/createhandler/data' EXPLORATION_FEATURES_PREFIX = '/explorehandler/features' EXPLORATION_INIT_URL_PREFIX = '/explorehandler/init' EXPLORATION_LEARNER_ANSWER_DETAILS = ( '/learneranswerinfohandler/learner_answer_details') EXPLORATION_METADATA_SEARCH_URL = '/exploration/metadata_search' EXPLORATION_PRETESTS_URL_PREFIX = '/pretest_handler' EXPLORATION_RIGHTS_PREFIX = '/createhandler/rights' EXPLORATION_STATE_ANSWER_STATS_PREFIX = '/createhandler/state_answer_stats' EXPLORATION_STATUS_PREFIX = '/createhandler/status' EXPLORATION_SUMMARIES_DATA_URL = '/explorationsummarieshandler/data' EXPLORATION_URL_PREFIX = '/explore' EXPLORATION_URL_EMBED_PREFIX = '/embed/exploration' FEEDBACK_STATS_URL_PREFIX = '/feedbackstatshandler' FEEDBACK_THREAD_URL_PREFIX = '/threadhandler' FEEDBACK_THREADLIST_URL_PREFIX = '/threadlisthandler' FEEDBACK_THREADLIST_URL_PREFIX_FOR_TOPICS = '/threadlisthandlerfortopic' FEEDBACK_THREAD_VIEW_EVENT_URL = '/feedbackhandler/thread_view_event' FETCH_SKILLS_URL_PREFIX = '/fetch_skills' FLAG_EXPLORATION_URL_PREFIX = '/flagexplorationhandler' FRACTIONS_LANDING_PAGE_URL = '/fractions' IMPROVEMENTS_URL_PREFIX = '/improvements' IMPROVEMENTS_HISTORY_URL_PREFIX = '/improvements/history' IMPROVEMENTS_CONFIG_URL_PREFIX = '/improvements/config' LEARNER_ANSWER_INFO_HANDLER_URL = ( '/learneranswerinfohandler/learner_answer_details') LEARNER_ANSWER_DETAILS_SUBMIT_URL = '/learneranswerdetailshandler' LEARNER_DASHBOARD_URL = '/learner-dashboard' LEARNER_DASHBOARD_DATA_URL = '/learnerdashboardhandler/data' LEARNER_DASHBOARD_IDS_DATA_URL = '/learnerdashboardidshandler/data' LEARNER_DASHBOARD_FEEDBACK_THREAD_DATA_URL = '/learnerdashboardthreadhandler' LEARNER_GOALS_DATA_URL = '/learnergoalshandler' LEARNER_PLAYLIST_DATA_URL = '/learnerplaylistactivityhandler' LEARNER_INCOMPLETE_ACTIVITY_DATA_URL = '/learnerincompleteactivityhandler' LIBRARY_GROUP_DATA_URL = '/librarygrouphandler' LIBRARY_INDEX_URL = '/community-library' LIBRARY_INDEX_DATA_URL = '/libraryindexhandler' LIBRARY_RECENTLY_PUBLISHED_URL = '/community-library/recently-published' LIBRARY_SEARCH_URL = '/search/find' LIBRARY_SEARCH_DATA_URL = '/searchhandler/data' LIBRARY_TOP_RATED_URL = '/community-library/top-rated' MACHINE_TRANSLATION_DATA_URL = '/machine_translated_state_texts_handler' MERGE_SKILLS_URL = '/merge_skills_handler' NEW_COLLECTION_URL = '/collection_editor_handler/create_new' NEW_EXPLORATION_URL = '/contributehandler/create_new' NEW_QUESTION_URL = '/question_editor_handler/create_new' NEW_SKILL_URL = '/skill_editor_handler/create_new' TOPIC_EDITOR_STORY_URL = '/topic_editor_story_handler' TOPIC_EDITOR_QUESTION_URL = '/topic_editor_question_handler' NEW_TOPIC_URL = '/topic_editor_handler/create_new' PREFERENCES_URL = '/preferences' PRACTICE_SESSION_URL_PREFIX = '/practice_session' PRACTICE_SESSION_DATA_URL_PREFIX = '/practice_session/data' PREFERENCES_DATA_URL = '/preferenceshandler/data' QUESTION_EDITOR_DATA_URL_PREFIX = '/question_editor_handler/data' QUESTION_SKILL_LINK_URL_PREFIX = '/manage_question_skill_link' QUESTIONS_LIST_URL_PREFIX = '/questions_list_handler' QUESTION_COUNT_URL_PREFIX = '/question_count_handler' QUESTIONS_URL_PREFIX = '/question_player_handler' RECENT_COMMITS_DATA_URL = '/recentcommitshandler/recent_commits' RECENT_FEEDBACK_MESSAGES_DATA_URL = '/recent_feedback_messages' DELETE_ACCOUNT_URL = '/delete-account' DELETE_ACCOUNT_HANDLER_URL = '/delete-account-handler' EXPORT_ACCOUNT_HANDLER_URL = '/export-account-handler' PENDING_ACCOUNT_DELETION_URL = '/pending-account-deletion' REVIEW_TEST_DATA_URL_PREFIX = '/review_test_handler/data' REVIEW_TEST_URL_PREFIX = '/review_test' ROBOTS_TXT_URL = '/robots.txt' SITE_LANGUAGE_DATA_URL = '/save_site_language' SIGNUP_DATA_URL = '/signuphandler/data' SIGNUP_URL = '/signup' SKILL_DASHBOARD_DATA_URL = '/skills_dashboard/data' SKILL_DATA_URL_PREFIX = '/skill_data_handler' SKILL_EDITOR_DATA_URL_PREFIX = '/skill_editor_handler/data' SKILL_EDITOR_URL_PREFIX = '/skill_editor' SKILL_EDITOR_QUESTION_URL = '/skill_editor_question_handler' SKILL_MASTERY_DATA_URL = '/skill_mastery_handler/data' SKILL_RIGHTS_URL_PREFIX = '/skill_editor_handler/rights' SKILL_DESCRIPTION_HANDLER = '/skill_description_handler' STORY_DATA_HANDLER = '/story_data_handler' STORY_EDITOR_URL_PREFIX = '/story_editor' STORY_EDITOR_DATA_URL_PREFIX = '/story_editor_handler/data' STORY_PROGRESS_URL_PREFIX = '/story_progress_handler' STORY_PUBLISH_HANDLER = '/story_publish_handler' STORY_URL_FRAGMENT_HANDLER = '/story_url_fragment_handler' STORY_VIEWER_URL_PREFIX = '/story' SUBTOPIC_DATA_HANDLER = '/subtopic_data_handler' SUBTOPIC_VIEWER_URL_PREFIX = '/subtopic' SUGGESTION_ACTION_URL_PREFIX = '/suggestionactionhandler' SUGGESTION_LIST_URL_PREFIX = '/suggestionlisthandler' SUGGESTION_URL_PREFIX = '/suggestionhandler' UPDATE_TRANSLATION_SUGGESTION_URL_PREFIX = ( '/updatetranslationsuggestionhandler') UPDATE_QUESTION_SUGGESTION_URL_PREFIX = ( '/updatequestionsuggestionhandler') SUBSCRIBE_URL_PREFIX = '/subscribehandler' SUBTOPIC_PAGE_EDITOR_DATA_URL_PREFIX = '/subtopic_page_editor_handler/data' TOPIC_VIEWER_URL_PREFIX = ( '/learn/<classroom_url_fragment>/<topic_url_fragment>') TOPIC_DATA_HANDLER = '/topic_data_handler' TOPIC_EDITOR_DATA_URL_PREFIX = '/topic_editor_handler/data' TOPIC_EDITOR_URL_PREFIX = '/topic_editor' TOPIC_NAME_HANDLER = '/topic_name_handler' TOPIC_RIGHTS_URL_PREFIX = '/rightshandler/get_topic_rights' TOPIC_SEND_MAIL_URL_PREFIX = '/rightshandler/send_topic_publish_mail' TOPIC_STATUS_URL_PREFIX = '/rightshandler/change_topic_status' TOPIC_URL_FRAGMENT_HANDLER = '/topic_url_fragment_handler' TOPICS_AND_SKILLS_DASHBOARD_DATA_URL = '/topics_and_skills_dashboard/data' UNASSIGN_SKILL_DATA_HANDLER_URL = '/topics_and_skills_dashboard/unassign_skill' TOPICS_AND_SKILLS_DASHBOARD_URL = '/topics-and-skills-dashboard' UNSUBSCRIBE_URL_PREFIX = '/unsubscribehandler' UPLOAD_EXPLORATION_URL = '/contributehandler/upload' USER_EXPLORATION_EMAILS_PREFIX = '/createhandler/notificationpreferences' USER_PERMISSIONS_URL_PREFIX = '/createhandler/permissions' USERNAME_CHECK_DATA_URL = '/usernamehandler/data' VALIDATE_STORY_EXPLORATIONS_URL_PREFIX = '/validate_story_explorations' # Event types. EVENT_TYPE_ALL_STATS = 'all_stats' EVENT_TYPE_STATE_HIT = 'state_hit' EVENT_TYPE_STATE_COMPLETED = 'state_complete' EVENT_TYPE_ANSWER_SUBMITTED = 'answer_submitted' EVENT_TYPE_DEFAULT_ANSWER_RESOLVED = 'default_answer_resolved' EVENT_TYPE_NEW_THREAD_CREATED = 'feedback_thread_created' EVENT_TYPE_THREAD_STATUS_CHANGED = 'feedback_thread_status_changed' EVENT_TYPE_RATE_EXPLORATION = 'rate_exploration' EVENT_TYPE_SOLUTION_HIT = 'solution_hit' EVENT_TYPE_LEAVE_FOR_REFRESHER_EXP = 'leave_for_refresher_exp' # The values for these event types should be left as-is for backwards # compatibility. EVENT_TYPE_START_EXPLORATION = 'start' EVENT_TYPE_ACTUAL_START_EXPLORATION = 'actual_start' EVENT_TYPE_MAYBE_LEAVE_EXPLORATION = 'leave' EVENT_TYPE_COMPLETE_EXPLORATION = 'complete' # Play type constants. PLAY_TYPE_PLAYTEST = 'playtest' PLAY_TYPE_NORMAL = 'normal' # Predefined commit messages. COMMIT_MESSAGE_EXPLORATION_DELETED = 'Exploration deleted.' COMMIT_MESSAGE_COLLECTION_DELETED = 'Collection deleted.' COMMIT_MESSAGE_QUESTION_DELETED = 'Question deleted.' COMMIT_MESSAGE_SKILL_DELETED = 'Skill deleted.' COMMIT_MESSAGE_STORY_DELETED = 'Story deleted.' COMMIT_MESSAGE_SUBTOPIC_PAGE_DELETED = 'Subtopic page deleted.' COMMIT_MESSAGE_TOPIC_DELETED = 'Topic deleted.' # Max number of playthroughs for an issue. MAX_PLAYTHROUGHS_FOR_ISSUE = 5 # Number of unresolved answers to be displayed in the dashboard for each # exploration. TOP_UNRESOLVED_ANSWERS_COUNT_DASHBOARD = 3 # Number of open feedback to be displayed in the dashboard for each exploration. OPEN_FEEDBACK_COUNT_DASHBOARD = 3 # NOTE TO DEVELOPERS: This should be synchronized with app.constants.ts. ENABLE_ML_CLASSIFIERS = False # The regular expression used to identify whether a string contains float value. # The regex must match with regex that is stored in vmconf.py file of Oppia-ml. # If this regex needs to be modified then first of all shutdown Oppia-ml VM. # Then update the regex constant in here and Oppia both. # Run any migration job that is required to migrate existing trained models # before starting Oppia-ml again. FLOAT_VERIFIER_REGEX = ( '^([-+]?\\d*\\.\\d+)$|^([-+]?(\\d*\\.?\\d+|\\d+\\.?\\d*)e[-+]?\\d*)$') # Current event models schema version. All event models with an # event_schema_version of 1 are the events collected before the rework of the # statistics framework which brought about the recording of new event models; # these models include all models recorded before Feb 2018. CURRENT_EVENT_MODELS_SCHEMA_VERSION = 2 # Output formats of downloaded explorations. OUTPUT_FORMAT_JSON = 'json' OUTPUT_FORMAT_ZIP = 'zip' # Types of updates shown in the 'recent updates' table in the dashboard page. UPDATE_TYPE_EXPLORATION_COMMIT = 'exploration_commit' UPDATE_TYPE_COLLECTION_COMMIT = 'collection_commit' UPDATE_TYPE_FEEDBACK_MESSAGE = 'feedback_thread' # Possible values for user query status. # Valid status transitions are: processing --> completed --> archived # or processing --> failed. USER_QUERY_STATUS_PROCESSING = 'processing' USER_QUERY_STATUS_COMPLETED = 'completed' USER_QUERY_STATUS_ARCHIVED = 'archived' USER_QUERY_STATUS_FAILED = 'failed' ALLOWED_USER_QUERY_STATUSES = ( USER_QUERY_STATUS_PROCESSING, USER_QUERY_STATUS_COMPLETED, USER_QUERY_STATUS_ARCHIVED, USER_QUERY_STATUS_FAILED ) # The time difference between which to consider two login events "close". This # is taken to be 12 hours. PROXIMAL_TIMEDELTA_SECS = 12 * 60 * 60 # The i18n id for the header of the "Featured Activities" category in the # library index page. LIBRARY_CATEGORY_FEATURED_ACTIVITIES = 'I18N_LIBRARY_GROUPS_FEATURED_ACTIVITIES' # The i18n id for the header of the "Top Rated Explorations" category in the # library index page. LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS = ( 'I18N_LIBRARY_GROUPS_TOP_RATED_EXPLORATIONS') # The i18n id for the header of the "Recently Published" category in the # library index page. LIBRARY_CATEGORY_RECENTLY_PUBLISHED = 'I18N_LIBRARY_GROUPS_RECENTLY_PUBLISHED' # The group name that appears at the end of the url for the recently published # page. LIBRARY_GROUP_RECENTLY_PUBLISHED = 'recently-published' # The group name that appears at the end of the url for the top rated page. LIBRARY_GROUP_TOP_RATED = 'top-rated' # Defaults for topic similarities. DEFAULT_TOPIC_SIMILARITY = 0.5 SAME_TOPIC_SIMILARITY = 1.0 # The type of the response returned by a handler when an exception is raised. HANDLER_TYPE_HTML = 'html' HANDLER_TYPE_JSON = 'json' HANDLER_TYPE_DOWNLOADABLE = 'downloadable' # Following are the constants for the role IDs. ROLE_ID_ADMIN = 'ADMIN' ROLE_ID_BANNED_USER = 'BANNED_USER' ROLE_ID_BLOG_ADMIN = 'BLOG_ADMIN' ROLE_ID_BLOG_POST_EDITOR = 'BLOG_POST_EDITOR' ROLE_ID_COLLECTION_EDITOR = 'COLLECTION_EDITOR' ROLE_ID_EXPLORATION_EDITOR = 'EXPLORATION_EDITOR' ROLE_ID_GUEST = 'GUEST' ROLE_ID_LEARNER = 'LEARNER' ROLE_ID_MODERATOR = 'MODERATOR' ROLE_ID_QUESTION_ADMIN = 'QUESTION_ADMIN' ROLE_ID_RELEASE_COORDINATOR = 'RELEASE_COORDINATOR' ROLE_ID_TOPIC_MANAGER = 'TOPIC_MANAGER' ROLE_ID_TRANSLATION_ADMIN = 'TRANSLATION_ADMIN' ROLE_ID_VOICEOVER_ADMIN = 'VOICEOVER_ADMIN' ALLOWED_USER_ROLES = [ ROLE_ID_ADMIN, ROLE_ID_BANNED_USER, ROLE_ID_BLOG_ADMIN, ROLE_ID_BLOG_POST_EDITOR, ROLE_ID_COLLECTION_EDITOR, ROLE_ID_EXPLORATION_EDITOR, ROLE_ID_GUEST, ROLE_ID_LEARNER, ROLE_ID_MODERATOR, ROLE_ID_QUESTION_ADMIN, ROLE_ID_RELEASE_COORDINATOR, ROLE_ID_TOPIC_MANAGER, ROLE_ID_TRANSLATION_ADMIN, ROLE_ID_VOICEOVER_ADMIN ] # Intent of the User making query to role structure via admin interface. Used # to store audit data regarding queries to role IDs. ROLE_ACTION_UPDATE = 'update' ROLE_ACTION_VIEW_BY_USERNAME = 'view_by_username' ROLE_ACTION_VIEW_BY_ROLE = 'view_by_role' USER_FILTER_CRITERION_ROLE = 'role' USER_FILTER_CRITERION_USERNAME = 'username' QUESTION_BATCH_SIZE = 10 STATE_ANSWER_STATS_MIN_FREQUENCY = 2 RTE_FORMAT_TEXTANGULAR = 'text-angular' RTE_FORMAT_CKEDITOR = 'ck-editor' # RTE content specifications according to the type of the editor. RTE_CONTENT_SPEC = { 'RTE_TYPE_TEXTANGULAR': { # Valid parent-child relation in TextAngular. 'ALLOWED_PARENT_LIST': { 'p': ['blockquote', 'div', 'pre', '[document]', 'ol', 'ul', 'li'], 'b': ['i', 'li', 'p', 'pre'], 'br': ['b', 'i', 'li', 'p'], 'i': ['b', 'li', 'p', 'pre'], 'li': ['ol', 'ul'], 'ol': ['ol', 'ul', 'blockquote', 'li', 'pre', 'div', '[document]'], 'ul': ['ol', 'ul', 'blockquote', 'li', 'pre', 'div', '[document]'], 'pre': ['ol', 'ul', 'blockquote', '[document]'], 'blockquote': ['blockquote', '[document]'], 'oppia-noninteractive-link': ['b', 'i', 'li', 'p', 'pre'], 'oppia-noninteractive-math': ['b', 'i', 'li', 'p', 'pre'], 'oppia-noninteractive-image': ['b', 'i', 'li', 'p', 'pre'], 'oppia-noninteractive-collapsible': ['b', 'i', 'li', 'p', 'pre'], 'oppia-noninteractive-video': ['b', 'i', 'li', 'p', 'pre'], 'oppia-noninteractive-tabs': ['b', 'i', 'li', 'p', 'pre'], 'oppia-noninteractive-svgdiagram': ['b', 'i', 'li', 'p', 'pre'] }, # Valid html tags in TextAngular. 'ALLOWED_TAG_LIST': [ 'p', 'b', 'br', 'i', 'li', 'ol', 'ul', 'pre', 'blockquote', 'oppia-noninteractive-link', 'oppia-noninteractive-math', 'oppia-noninteractive-image', 'oppia-noninteractive-collapsible', 'oppia-noninteractive-video', 'oppia-noninteractive-tabs', 'oppia-noninteractive-svgdiagram' ] }, 'RTE_TYPE_CKEDITOR': { # Valid parent-child relation in CKEditor. 'ALLOWED_PARENT_LIST': { 'p': ['blockquote', '[document]', 'li'], 'strong': ['em', 'li', 'p', 'pre'], 'em': ['strong', 'li', 'p', 'pre'], 'br': ['strong', 'em', 'li', 'p'], 'li': ['ol', 'ul'], 'ol': ['li', 'blockquote', 'pre', '[document]'], 'ul': ['li', 'blockquote', 'pre', '[document]'], 'pre': ['ol', 'ul', 'blockquote', 'li', '[document]'], 'blockquote': ['blockquote', '[document]'], 'oppia-noninteractive-link': ['strong', 'em', 'li', 'p', 'pre'], 'oppia-noninteractive-math': ['strong', 'em', 'li', 'p', 'pre'], 'oppia-noninteractive-image': ['blockquote', 'li', '[document]'], 'oppia-noninteractive-svgdiagram': [ 'blockquote', 'li', '[document]' ], 'oppia-noninteractive-collapsible': [ 'blockquote', 'li', '[document]' ], 'oppia-noninteractive-video': ['blockquote', 'li', '[document]'], 'oppia-noninteractive-tabs': ['blockquote', 'li', '[document]'] }, # Valid html tags in CKEditor. 'ALLOWED_TAG_LIST': [ 'p', 'strong', 'br', 'em', 'li', 'ol', 'ul', 'pre', 'blockquote', 'oppia-noninteractive-link', 'oppia-noninteractive-math', 'oppia-noninteractive-image', 'oppia-noninteractive-collapsible', 'oppia-noninteractive-video', 'oppia-noninteractive-tabs', 'oppia-noninteractive-svgdiagram' ] } } # A dict representing available landing pages, having subject as a key and list # of topics as the value. # Note: This dict needs to be keep in sync with frontend TOPIC_LANDING_PAGE_DATA # oppia constant defined in # core/templates/pages/landing-pages/TopicLandingPage.js file. AVAILABLE_LANDING_PAGES = { 'math': ['fractions', 'negative-numbers', 'ratios'] } # Classroom page names for generating URLs. These need to be kept in sync with # CLASSROOM_PAGES_DATA property in config_domain. CLASSROOM_PAGES = ['math'] # Authentication method using GAE ID (google sign in). GAE_AUTH_PROVIDER_ID = 'gae' # Authentication method using Firebase authentication. Firebase signs its ID # Tokens with iss='Firebase' (iss: issuer, public API refers to this as # "provider id"), so using this naming convention helps us stay consistent with # the status quo. FIREBASE_AUTH_PROVIDER_ID = 'Firebase' # Firebase-specific role specified for users with super admin privileges. FIREBASE_ROLE_SUPER_ADMIN = 'super_admin' # Firebase *explicitly* requires IDs to have at most 128 characters, and may # contain any valid ASCII character: # https://firebase.google.com/docs/auth/admin/manage-users#create_a_user # # After manually inspecting ~200 of them, however, we've found that they only # use alpha-numeric characters, hence the tighter restriction. FIREBASE_AUTH_ID_REGEX = '^[A-Za-z0-9]{1,128}$' CLOUD_DATASTORE_EMULATOR_HOST = 'localhost' CLOUD_DATASTORE_EMULATOR_PORT = 8089 FIREBASE_EMULATOR_CONFIG_PATH = '.firebase.json' FIREBASE_EMULATOR_PORT = 9099 # The name of the cookie Oppia will place the session cookie into. The name is # arbitrary. If it is changed later on, then the cookie will live on in the # users' browsers as garbage (although it would expire eventually, see MAX_AGE). FIREBASE_SESSION_COOKIE_NAME = 'session' # The duration a session cookie from Firebase should remain valid for. After the # duration expires, a new cookie will need to be generated. Generating a new # cookie requires the user to sign-in _explicitly_. FIREBASE_SESSION_COOKIE_MAX_AGE = datetime.timedelta(days=14) # TODO(#10501): Once domain objects can be imported by the storage layer, move # these back to appropriate places (rights_domain, topic_domain). # The reserved prefix for keys that are automatically inserted into a # commit_cmd dict by this model. AUTOGENERATED_PREFIX = 'AUTO' # The command string for a revert commit. CMD_REVERT_COMMIT = '%s_revert_version_number' % AUTOGENERATED_PREFIX # The command string for a delete commit. CMD_DELETE_COMMIT = '%s_mark_deleted' % AUTOGENERATED_PREFIX # IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve # backward-compatibility with previous exploration snapshots in the datastore. # Do not modify the definitions of CMD keys that already exist. CMD_CREATE_NEW = 'create_new' CMD_CHANGE_ROLE = 'change_role' CMD_REMOVE_ROLE = 'remove_role' CMD_CHANGE_EXPLORATION_STATUS = 'change_exploration_status' CMD_CHANGE_COLLECTION_STATUS = 'change_collection_status' CMD_CHANGE_PRIVATE_VIEWABILITY = 'change_private_viewability' CMD_RELEASE_OWNERSHIP = 'release_ownership' CMD_UPDATE_FIRST_PUBLISHED_MSEC = 'update_first_published_msec' # Roles used in collections and explorations. ROLE_OWNER = 'owner' ROLE_EDITOR = 'editor' ROLE_VOICE_ARTIST = 'voice artist' ROLE_VIEWER = 'viewer' ROLE_NONE = 'none' # The list of entity types that do not require entity specific access control # when viewing respective suggestions. ENTITY_TYPES_WITH_UNRESTRICTED_VIEW_SUGGESTION_ACCESS = [ENTITY_TYPE_SKILL] # The allowed list of roles which can be used in change_role command. ALLOWED_ACTIVITY_ROLES = [ ROLE_OWNER, ROLE_EDITOR, ROLE_VOICE_ARTIST, ROLE_VIEWER] # The allowed list of status which can be used in change_exploration_status # and change_collection_status commands. ALLOWED_ACTIVITY_STATUS = [ constants.ACTIVITY_STATUS_PRIVATE, constants.ACTIVITY_STATUS_PUBLIC] # Commands allowed in CollectionRightsChange and ExplorationRightsChange. COMMON_RIGHTS_ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': [], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_CHANGE_ROLE, 'required_attribute_names': ['assignee_id', 'old_role', 'new_role'], 'optional_attribute_names': [], 'user_id_attribute_names': ['assignee_id'], 'allowed_values': { 'new_role': ALLOWED_ACTIVITY_ROLES, 'old_role': ALLOWED_ACTIVITY_ROLES} }, { 'name': CMD_REMOVE_ROLE, 'required_attribute_names': ['removed_user_id', 'old_role'], 'optional_attribute_names': [], 'user_id_attribute_names': ['removed_user_id'], 'allowed_values': {'old_role': ALLOWED_ACTIVITY_ROLES} }, { 'name': CMD_CHANGE_PRIVATE_VIEWABILITY, 'required_attribute_names': [ 'old_viewable_if_private', 'new_viewable_if_private'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_RELEASE_OWNERSHIP, 'required_attribute_names': [], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UPDATE_FIRST_PUBLISHED_MSEC, 'required_attribute_names': [ 'old_first_published_msec', 'new_first_published_msec'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_DELETE_COMMIT, 'required_attribute_names': [], 'optional_attribute_names': [], 'user_id_attribute_names': [] }] COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS = copy.deepcopy( COMMON_RIGHTS_ALLOWED_COMMANDS) COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS.append({ 'name': CMD_CHANGE_COLLECTION_STATUS, 'required_attribute_names': ['old_status', 'new_status'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': { 'old_status': ALLOWED_ACTIVITY_STATUS, 'new_status': ALLOWED_ACTIVITY_STATUS } }) EXPLORATION_RIGHTS_CHANGE_ALLOWED_COMMANDS = copy.deepcopy( COMMON_RIGHTS_ALLOWED_COMMANDS) EXPLORATION_RIGHTS_CHANGE_ALLOWED_COMMANDS.append({ 'name': CMD_CHANGE_EXPLORATION_STATUS, 'required_attribute_names': ['old_status', 'new_status'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': { 'old_status': ALLOWED_ACTIVITY_STATUS, 'new_status': ALLOWED_ACTIVITY_STATUS }, # TODO(#12991): Remove this once once we use the migration jobs to remove # the deprecated values from the server data. 'deprecated_values': { 'new_status': ['publicized'] } }) CMD_REMOVE_MANAGER_ROLE = 'remove_manager_role' CMD_PUBLISH_TOPIC = 'publish_topic' CMD_UNPUBLISH_TOPIC = 'unpublish_topic' ROLE_MANAGER = 'manager' # The allowed list of roles which can be used in TopicRightsChange change_role # command. ALLOWED_TOPIC_ROLES = [ROLE_NONE, ROLE_MANAGER] # Commands allowed in TopicRightsChange. TOPIC_RIGHTS_CHANGE_ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': [], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_CHANGE_ROLE, 'required_attribute_names': ['assignee_id', 'new_role', 'old_role'], 'optional_attribute_names': [], 'user_id_attribute_names': ['assignee_id'], 'allowed_values': { 'new_role': ALLOWED_TOPIC_ROLES, 'old_role': ALLOWED_TOPIC_ROLES } }, { 'name': CMD_REMOVE_MANAGER_ROLE, 'required_attribute_names': ['removed_user_id'], 'optional_attribute_names': [], 'user_id_attribute_names': ['removed_user_id'] }, { 'name': CMD_PUBLISH_TOPIC, 'required_attribute_names': [], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UNPUBLISH_TOPIC, 'required_attribute_names': [], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_DELETE_COMMIT, 'required_attribute_names': [], 'optional_attribute_names': [], 'user_id_attribute_names': [] }] USER_ID_RANDOM_PART_LENGTH = 32 USER_ID_LENGTH = 36 USER_ID_REGEX = r'uid_[a-z]{%s}' % USER_ID_RANDOM_PART_LENGTH PSEUDONYMOUS_ID_REGEX = r'pid_[a-z]{%s}' % USER_ID_RANDOM_PART_LENGTH # Length of user PIN for different roles used on Android. FULL_USER_PIN_LENGTH = 5 PROFILE_USER_PIN_LENGTH = 3 MAX_NUMBER_OF_OPS_IN_TRANSACTION = 25 # This is the maximum wait time for the task queue HTTP request. If the request # takes longer than this value, an exception is raised. The default value # of 5 seconds is too short and must be avoided because it can cause events # to go unrecorded. # https://cloud.google.com/appengine/docs/standard/python/outbound-requests#request_timeouts DEFAULT_TASKQUEUE_TIMEOUT_SECONDS = 30 # Mapping from issue type to issue keyname in the issue customization dict. This # mapping is useful to uniquely identify issues by the combination of their # issue type and other type-specific information (such as the list of states # involved). CUSTOMIZATION_ARG_WHICH_IDENTIFIES_ISSUE = { 'EarlyQuit': 'state_name', 'MultipleIncorrectSubmissions': 'state_name', 'CyclicStateTransitions': 'state_names' } # Constants defining various suggestion types. SUGGESTION_TYPE_EDIT_STATE_CONTENT = 'edit_exploration_state_content' SUGGESTION_TYPE_TRANSLATE_CONTENT = 'translate_content' SUGGESTION_TYPE_ADD_QUESTION = 'add_question' # Suggestion fields that can be queried. ALLOWED_SUGGESTION_QUERY_FIELDS = [ 'suggestion_type', 'target_type', 'target_id', 'status', 'author_id', 'final_reviewer_id', 'score_category', 'language_code' ] # Possible targets that the suggestions can modify. SUGGESTION_TARGET_TYPE_CHOICES = [ ENTITY_TYPE_EXPLORATION, ENTITY_TYPE_QUESTION, ENTITY_TYPE_SKILL, ENTITY_TYPE_TOPIC ] # Possible suggestion types. SUGGESTION_TYPE_CHOICES = [ SUGGESTION_TYPE_EDIT_STATE_CONTENT, SUGGESTION_TYPE_TRANSLATE_CONTENT, SUGGESTION_TYPE_ADD_QUESTION ] # The types of suggestions that are offered on the Contributor Dashboard. CONTRIBUTOR_DASHBOARD_SUGGESTION_TYPES = [ SUGGESTION_TYPE_TRANSLATE_CONTENT, SUGGESTION_TYPE_ADD_QUESTION ]
[]
[]
[ "SERVER_SOFTWARE" ]
[]
["SERVER_SOFTWARE"]
python
1
0
idiaptts/src/neural_networks/pytorch/utils.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/ # Written by Bastian Schnell <[email protected]> # import os import filecmp import torch def equal_iterable(item1, item2): # if torch.is_tensor(item1) and torch.is_tensor(item2): # return bool(item1.ne(item2).sum() == 0) # if isinstance(item1, numpy.ndarray) and isinstance(item2, numpy.ndarray): # return bool(numpy.equal(item1, item2).all()) if isinstance(item1, str) and isinstance(item2, str): return item1 == item2 try: if isinstance(item1, dict): iter1 = item1.values() if isinstance(item2, dict): iter2 = item2.values() else: return False else: iter1 = iter(item1) iter2 = iter(item2) equal = True for value1, value2 in zip(iter1, iter2): equal &= equal_iterable(value1, value2) return equal except TypeError as te: equal = (item1 == item2) # This is basically a check for torch and numpy tensors/arrays. try: iter(equal) return any(equal) except TypeError as te2: return equal def equal_model(model1, model2): for p1, p2 in zip(model1.parameters(), model2.parameters()): if p1.data.ne(p2.data).sum() > 0: return False if p1.grad is not None: if p2.grad is not None: if p1.grad.ne(p2.grad).sum() > 0: return False else: return False elif p2.grad is not None: return False return True def equal_checkpoint(model1_path, model1_suffix, model2_path, model2_suffix): model1_params_path = os.path.join(model1_path, "params_" + model1_suffix) model2_params_path = os.path.join(model2_path, "params_" + model2_suffix) model1_optimiser_path = os.path.join(model1_path, "optimiser_" + model1_suffix) model2_optimiser_path = os.path.join(model2_path, "optimiser_" + model2_suffix) model1_config_path = os.path.join(model1_path, "config.json") model2_config_path = os.path.join(model2_path, "config.json") # Try binary test first. return filecmp.cmp(os.path.join(model1_params_path), os.path.join(model2_params_path), shallow=False) \ and filecmp.cmp(os.path.join(model1_optimiser_path), os.path.join(model2_optimiser_path), shallow=False) \ and (model1_path == model2_path or filecmp.cmp(model1_config_path, model2_config_path, shallow=False)) # # When binary test fails check details manually. The only difference might be the save epoch. # checkpoint1 = torch.load(model1_path) # checkpoint2 = torch.load(model2_path) # if not checkpoint1["model_name"] == checkpoint2["model_name"]: # return False # if not checkpoint1["epoch"] == checkpoint2["epoch"]: # return False # try: # model1 = checkpoint1["model"] # model2 = checkpoint2["model"] # if not equal_model(model1, model2): # return False # except KeyError: # state_dict1 = checkpoint1["model_state_dict"] # state_dict2 = checkpoint2["model_state_dict"] # if not state_dict1.keys() == state_dict2.keys(): # return False # for key, value in state_dict1.items(): # if not (state_dict2[key] == value).all(): # return False # # Backwards compatibility for fully saved optimisers. # try: # optimiser1_state_dict = checkpoint1["optimiser"].state_dict() # except KeyError: # optimiser1_state_dict = checkpoint1["optimiser_state_dict"] # try: # optimiser2_state_dict = checkpoint2["optimiser"].state_dict() # except KeyError: # optimiser2_state_dict = checkpoint2["optimiser_state_dict"] # if optimiser1_state_dict is not None: # if optimiser2_state_dict is not None: # return equal_iterable(optimiser1_state_dict["state"], optimiser2_state_dict["state"]) # else: # return False # return True def tensor_pad(tensor: torch.Tensor, target_length: int, dim: int, mode: str = 'constant', value: float = 0.0): ndim = tensor.ndim assert dim < ndim, "Cannot pad dim {} of {}-dimensional tensor.".format( dim, ndim) current_length = tensor.shape[dim] assert current_length <= target_length, "Tensor is longer than padding " \ "length ({} > {}).".format(current_length, target_length) dim_padding = [0, target_length - tensor.shape[dim]] padding = [0, 0] * dim + dim_padding + [0, 0] * (ndim - dim - 1) return torch.nn.functional.pad(tensor, padding, mode=mode, value=value)
[]
[]
[]
[]
[]
python
null
null
null
localstack/services/es/es_starter.py
import os import six import logging import traceback from localstack import config from localstack.services import install from localstack.utils.aws import aws_stack from localstack.constants import DEFAULT_PORT_ELASTICSEARCH_BACKEND, LOCALSTACK_ROOT_FOLDER from localstack.utils.common import run, is_root, mkdir, chmod_r from localstack.services.infra import get_service_protocol, start_proxy_for_service, do_run from localstack.services.install import ROOT_PATH LOGGER = logging.getLogger(__name__) def delete_all_elasticsearch_data(): """ This function drops ALL data in the local Elasticsearch data folder. Use with caution! """ data_dir = os.path.join(LOCALSTACK_ROOT_FOLDER, 'infra', 'elasticsearch', 'data', 'elasticsearch', 'nodes') run('rm -rf "%s"' % data_dir) def start_elasticsearch(port=None, delete_data=True, asynchronous=False, update_listener=None): port = port or config.PORT_ELASTICSEARCH # delete Elasticsearch data that may be cached locally from a previous test run delete_all_elasticsearch_data() install.install_elasticsearch() backend_port = DEFAULT_PORT_ELASTICSEARCH_BACKEND es_data_dir = '%s/infra/elasticsearch/data' % (ROOT_PATH) es_tmp_dir = '%s/infra/elasticsearch/tmp' % (ROOT_PATH) es_mods_dir = '%s/infra/elasticsearch/modules' % (ROOT_PATH) if config.DATA_DIR: es_data_dir = '%s/elasticsearch' % config.DATA_DIR # Elasticsearch 5.x cannot be bound to 0.0.0.0 in some Docker environments, # hence we use the default bind address 127.0.0.0 and put a proxy in front of it cmd = (('%s/infra/elasticsearch/bin/elasticsearch ' + '-E http.port=%s -E http.publish_port=%s -E http.compression=false ' + '-E path.data=%s') % (ROOT_PATH, backend_port, backend_port, es_data_dir)) if os.path.exists(os.path.join(es_mods_dir, 'x-pack-ml')): cmd += ' -E xpack.ml.enabled=false' env_vars = { 'ES_JAVA_OPTS': os.environ.get('ES_JAVA_OPTS', '-Xms200m -Xmx600m'), 'ES_TMPDIR': es_tmp_dir } print('Starting local Elasticsearch (%s port %s)...' % (get_service_protocol(), port)) if delete_data: run('rm -rf %s' % es_data_dir) # fix permissions chmod_r('%s/infra/elasticsearch' % ROOT_PATH, 0o777) mkdir(es_data_dir) chmod_r(es_data_dir, 0o777) mkdir(es_tmp_dir) chmod_r(es_tmp_dir, 0o777) # start proxy and ES process start_proxy_for_service('elasticsearch', port, backend_port, update_listener, quiet=True, params={'protocol_version': 'HTTP/1.0'}) if is_root(): cmd = "su localstack -c '%s'" % cmd thread = do_run(cmd, asynchronous, env_vars=env_vars) return thread def check_elasticsearch(expect_shutdown=False, print_error=False): out = None try: # check Elasticsearch es = aws_stack.connect_elasticsearch() out = es.cat.aliases() except Exception as e: if print_error: LOGGER.error('Elasticsearch health check failed (retrying...): %s %s' % (e, traceback.format_exc())) if expect_shutdown: assert out is None else: assert isinstance(out, six.string_types)
[]
[]
[ "ES_JAVA_OPTS" ]
[]
["ES_JAVA_OPTS"]
python
1
0
cmd/pod-restarts-check/main.go
// Package podRestarts implements a checking tool for pods that are // restarting too much. package main import ( "context" "os" "path/filepath" "strconv" "strings" "time" v1 "k8s.io/api/core/v1" log "github.com/sirupsen/logrus" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" checkclient "github.com/kuberhealthy/kuberhealthy/v2/pkg/checks/external/checkclient" "github.com/kuberhealthy/kuberhealthy/v2/pkg/kubeClient" ) const defaultMaxFailuresAllowed = 10 const defaultCheckTimeout = 10 * time.Minute // KubeConfigFile is a variable containing file path of Kubernetes config files var KubeConfigFile = filepath.Join(os.Getenv("HOME"), ".kube", "config") // Namespace is a variable to allow code to target all namespaces or a single namespace var Namespace string // CheckTimeout is a variable for how long code should run before it should retry. var CheckTimeout time.Duration // MaxFailuresAllowed is a variable for how many times the pod should retry before stopping. var MaxFailuresAllowed int32 // Checker represents a long running pod restart checker. type Checker struct { Namespace string MaxFailuresAllowed int32 BadPods map[string]string client *kubernetes.Clientset } func init() { // Grab and verify environment variables and set them as global vars Namespace = os.Getenv("POD_NAMESPACE") if Namespace == "" { log.Infoln("Looking for pods across all namespaces, this requires a cluster role") // it is the same value but we are being explicit that we are listing pods in all namespaces Namespace = v1.NamespaceAll } else { log.Infoln("Looking for pods in namespace:", Namespace) } // Set check time limit to default CheckTimeout = defaultCheckTimeout // Get the deadline time in unix from the env var timeDeadline, err := checkclient.GetDeadline() if err != nil { log.Infoln("There was an issue getting the check deadline:", err.Error()) } CheckTimeout = timeDeadline.Sub(time.Now().Add(time.Second * 5)) log.Infoln("Check time limit set to:", CheckTimeout) MaxFailuresAllowed = defaultMaxFailuresAllowed maxFailuresAllowed := os.Getenv("MAX_FAILURES_ALLOWED") if len(maxFailuresAllowed) != 0 { conversion, err := strconv.ParseInt(maxFailuresAllowed, 10, 32) MaxFailuresAllowed = int32(conversion) if err != nil { log.Errorln("Error converting maxFailuresAllowed:", maxFailuresAllowed, "to int, err:", err) return } } } func main() { // Create client client, err := kubeClient.Create(KubeConfigFile) if err != nil { log.Fatalln("Unable to create kubernetes client", err) } // Create new pod restarts checker with Kubernetes client prc := New(client) // Run check err = prc.Run() if err != nil { log.Errorln("Error running Pod Restarts check:", err) os.Exit(2) } log.Infoln("Done running Pod Restarts check") os.Exit(0) } // New creates a new pod restart checker for a specific namespace, ready to use. func New(client *kubernetes.Clientset) *Checker { return &Checker{ Namespace: Namespace, MaxFailuresAllowed: MaxFailuresAllowed, BadPods: make(map[string]string), client: client, } } // Run starts the go routine to run checks, reports whether or not the check completely successfully, and finally checks // for any errors in the Checker struct and re func (prc *Checker) Run() error { // TODO: refactor function to receive context on exported function in next breaking change. ctx := context.TODO() log.Infoln("Running Pod Restarts checker") doneChan := make(chan error) // run the check in a goroutine and notify the doneChan when completed go func(doneChan chan error) { err := prc.doChecks(ctx) doneChan <- err }(doneChan) // wait for either a timeout or job completion select { case <-time.After(CheckTimeout): // The check has timed out after its specified timeout period errorMessage := "Failed to complete Pod Restart check in time! Timeout was reached." err := reportKHFailure([]string{errorMessage}) if err != nil { return err } return err case err := <-doneChan: if len(prc.BadPods) != 0 || err != nil { var errorMessages []string if err != nil { log.Error(err) errorMessages = append(errorMessages, err.Error()) } for _, msg := range prc.BadPods { errorMessages = append(errorMessages, msg) } return reportKHFailure(errorMessages) } return reportKHSuccess() } } // doChecks grabs all events in a given namespace, then checks for pods with event type "Warning" with reason "BackOff", // and an event count greater than the MaxFailuresAllowed. If any of these pods are found, an error message is appended // to Checker struct errorMessages. func (prc *Checker) doChecks(ctx context.Context) error { log.Infoln("Checking for pod BackOff events for all pods in the namespace:", prc.Namespace) podWarningEvents, err := prc.client.CoreV1().Events(prc.Namespace).List(ctx, metav1.ListOptions{FieldSelector: "type=Warning"}) if err != nil { return err } if len(podWarningEvents.Items) != 0 { log.Infoln("Found `Warning` events in the namespace:", prc.Namespace) for _, event := range podWarningEvents.Items { // Checks for pods with BackOff events greater than the MaxFailuresAllowed if event.InvolvedObject.Kind == "Pod" && event.Reason == "BackOff" && event.Count > prc.MaxFailuresAllowed { errorMessage := "Found: " + strconv.FormatInt(int64(event.Count), 10) + " `BackOff` events for pod: " + event.InvolvedObject.Name + " in namespace: " + event.Namespace log.Infoln(errorMessage) // We could be checking for pods in all namespaces so prefix the namespace prc.BadPods[event.InvolvedObject.Namespace+"/"+event.InvolvedObject.Name] = errorMessage } } } for pod := range prc.BadPods { err := prc.verifyBadPodRestartExists(ctx, pod) if err != nil { return err } } return err } // verifyBadPodRestartExists removes the bad pod found from the events list if the pod no longer exists func (prc *Checker) verifyBadPodRestartExists(ctx context.Context, pod string) error { // Pod is in the form namespace/pod_name parts := strings.Split(pod, "/") namespace := parts[0] podName := parts[1] _, err := prc.client.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { if k8sErrors.IsNotFound(err) || strings.Contains(err.Error(), "not found") { log.Infoln("Bad Pod:", podName, "no longer exists. Removing from bad pods map") delete(prc.BadPods, podName) } else { log.Infoln("Error getting bad pod:", podName, err) return err } } return nil } // reportKHSuccess reports success to Kuberhealthy servers and verifies the report successfully went through func reportKHSuccess() error { err := checkclient.ReportSuccess() if err != nil { log.Println("Error reporting success to Kuberhealthy servers:", err) return err } log.Println("Successfully reported success to Kuberhealthy servers") return err } // reportKHFailure reports failure to Kuberhealthy servers and verifies the report successfully went through func reportKHFailure(errorMessages []string) error { err := checkclient.ReportFailure(errorMessages) if err != nil { log.Println("Error reporting failure to Kuberhealthy servers:", err) return err } log.Println("Successfully reported failure to Kuberhealthy servers") return err }
[ "\"HOME\"", "\"POD_NAMESPACE\"", "\"MAX_FAILURES_ALLOWED\"" ]
[]
[ "POD_NAMESPACE", "HOME", "MAX_FAILURES_ALLOWED" ]
[]
["POD_NAMESPACE", "HOME", "MAX_FAILURES_ALLOWED"]
go
3
0
components/kyma-environment-broker/internal/storage/driver/postsql/instance.go
package postsql import ( "github.com/kyma-incubator/compass/components/kyma-environment-broker/internal" "github.com/kyma-incubator/compass/components/kyma-environment-broker/internal/storage/dberr" "github.com/kyma-incubator/compass/components/kyma-environment-broker/internal/storage/dbsession" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/wait" ) type Instance struct { dbsession.Factory } func NewInstance(sess dbsession.Factory) *Instance { return &Instance{ Factory: sess, } } // TODO: Wrap retries in single method WithRetries func (s *Instance) GetByID(instanceID string) (*internal.Instance, error) { sess := s.NewReadSession() instance := &internal.Instance{} err := wait.Poll(defaultRetryInterval, defaultRetryTimeout, func() (bool, error) { inst, err := sess.GetInstanceByID(instanceID) if err != nil { if err.Code() == dberr.CodeNotFound { return false, dberr.NotFound("Instance with id %s not exist", instanceID) } log.Warn(errors.Wrapf(err, "while getting instance by ID %s", instanceID).Error()) return false, nil } instance = &inst return true, nil }) return instance, err } func (s *Instance) Insert(instance internal.Instance) error { _, err := s.GetByID(instance.InstanceID) if err == nil { return dberr.AlreadyExists("instance with id %s already exist", instance.InstanceID) } sess := s.NewWriteSession() return wait.Poll(defaultRetryInterval, defaultRetryTimeout, func() (bool, error) { err := sess.InsertInstance(instance) if err != nil { log.Warn(errors.Wrapf(err, "while saving instance ID %s", instance.InstanceID).Error()) return false, nil } return true, nil }) } func (s *Instance) Update(instance internal.Instance) error { sess := s.NewWriteSession() return wait.Poll(defaultRetryInterval, defaultRetryTimeout, func() (bool, error) { err := sess.UpdateInstance(instance) if err != nil { if err.Code() == dberr.CodeNotFound { return false, dberr.NotFound("Instance with id %s not exist", instance.InstanceID) } log.Warn(errors.Wrapf(err, "while updating instance ID %s", instance.InstanceID).Error()) return false, nil } return true, nil }) } func (s *Instance) Delete(instanceID string) error { sess := s.NewWriteSession() return sess.DeleteInstance(instanceID) }
[]
[]
[]
[]
[]
go
null
null
null
official/cv/unet3d/eval.py
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import os import numpy as np from mindspore import dtype as mstype from mindspore import Model, context, Tensor from mindspore.train.serialization import load_checkpoint, load_param_into_net from src.dataset import create_dataset from src.unet3d_model import UNet3d, UNet3d_ from src.utils import create_sliding_window, CalculateDice from src.model_utils.config import config from src.model_utils.moxing_adapter import moxing_wrapper device_id = int(os.getenv('DEVICE_ID')) context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, save_graphs=False, device_id=device_id) @moxing_wrapper() def test_net(data_path, ckpt_path): data_dir = data_path + "/image/" seg_dir = data_path + "/seg/" eval_dataset = create_dataset(data_path=data_dir, seg_path=seg_dir, is_training=False) eval_data_size = eval_dataset.get_dataset_size() print("train dataset length is:", eval_data_size) if config.device_target == 'Ascend': network = UNet3d() else: network = UNet3d_() network.set_train(False) param_dict = load_checkpoint(ckpt_path) load_param_into_net(network, param_dict) model = Model(network) index = 0 total_dice = 0 for batch in eval_dataset.create_dict_iterator(num_epochs=1, output_numpy=True): image = batch["image"] seg = batch["seg"] print("current image shape is {}".format(image.shape), flush=True) sliding_window_list, slice_list = create_sliding_window(image, config.roi_size, config.overlap) image_size = (config.batch_size, config.num_classes) + image.shape[2:] output_image = np.zeros(image_size, np.float32) count_map = np.zeros(image_size, np.float32) importance_map = np.ones(config.roi_size, np.float32) for window, slice_ in zip(sliding_window_list, slice_list): window_image = Tensor(window, mstype.float32) pred_probs = model.predict(window_image) output_image[slice_] += pred_probs.asnumpy() count_map[slice_] += importance_map output_image = output_image / count_map dice, _ = CalculateDice(output_image, seg) print("The {} batch dice is {}".format(index, dice), flush=True) total_dice += dice index = index + 1 avg_dice = total_dice / eval_data_size print("**********************End Eval***************************************") print("eval average dice is {}".format(avg_dice)) if __name__ == '__main__': test_net(data_path=config.data_path, ckpt_path=config.checkpoint_file_path)
[]
[]
[ "DEVICE_ID" ]
[]
["DEVICE_ID"]
python
1
0
util/kustomize/kustomize_test.go
package kustomize import ( "fmt" "io/ioutil" "os" "path" "path/filepath" "testing" "github.com/argoproj/pkg/exec" "github.com/stretchr/testify/assert" "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1" ) // TODO: move this into shared test package after resolving import cycle const ( // This is a throwaway gitlab test account/repo with a read-only personal access token for the // purposes of testing private git repos PrivateGitUsername = "blah" PrivateGitPassword = "B5sBDeoqAVUouoHkrovy" ) const kustomization1 = "kustomization_yaml" const kustomization2a = "kustomization_yml" const kustomization2b = "Kustomization" func testDataDir() (string, error) { res, err := ioutil.TempDir("", "kustomize-test") if err != nil { return "", err } _, err = exec.RunCommand("cp", "-r", "./testdata/"+kustomization1, filepath.Join(res, "testdata")) if err != nil { return "", err } return path.Join(res, "testdata"), nil } func TestKustomizeBuild(t *testing.T) { appPath, err := testDataDir() assert.Nil(t, err) namePrefix := "namePrefix-" kustomize := NewKustomizeApp(appPath, nil) kustomizeSource := v1alpha1.ApplicationSourceKustomize{ NamePrefix: namePrefix, ImageTags: []v1alpha1.KustomizeImageTag{ { Name: "k8s.gcr.io/nginx-slim", Value: "latest", }, }, Images: []string{"nginx:1.15.5"}, } objs, imageTags, images, err := kustomize.Build(&kustomizeSource) assert.Nil(t, err) if err != nil { assert.Equal(t, len(objs), 2) assert.Equal(t, len(imageTags), 0) assert.Equal(t, len(images), 2) } for _, obj := range objs { switch obj.GetKind() { case "StatefulSet": assert.Equal(t, namePrefix+"web", obj.GetName()) case "Deployment": assert.Equal(t, namePrefix+"nginx-deployment", obj.GetName()) } } for _, image := range images { switch image { case "nginx": assert.Equal(t, "1.15.5", image) case "k8s.gcr.io/nginx-slim": assert.Equal(t, "latest", image) } } } func TestFindKustomization(t *testing.T) { testFindKustomization(t, kustomization1, "kustomization.yaml") testFindKustomization(t, kustomization2a, "kustomization.yml") testFindKustomization(t, kustomization2b, "Kustomization") } func testFindKustomization(t *testing.T, set string, expected string) { kustomization, err := (&kustomize{path: "testdata/" + set}).findKustomization() assert.Nil(t, err) assert.Equal(t, "testdata/"+set+"/"+expected, kustomization) } func TestGetKustomizationVersion(t *testing.T) { testGetKustomizationVersion(t, kustomization1, 1) testGetKustomizationVersion(t, kustomization2a, 2) testGetKustomizationVersion(t, kustomization2b, 2) } func testGetKustomizationVersion(t *testing.T, set string, expected int) { version, err := (&kustomize{path: "testdata/" + set}).getKustomizationVersion() assert.Nil(t, err) assert.Equal(t, expected, version) } func TestGetCommandName(t *testing.T) { assert.Equal(t, "kustomize1", GetCommandName(1)) assert.Equal(t, "kustomize", GetCommandName(2)) } func TestIsKustomization(t *testing.T) { assert.True(t, IsKustomization("kustomization.yaml")) assert.True(t, IsKustomization("kustomization.yml")) assert.True(t, IsKustomization("Kustomization")) assert.False(t, IsKustomization("rubbish.yml")) } // TestPrivateRemoteBase verifies we can supply git credentials to a private remote base func TestPrivateRemoteBase(t *testing.T) { os.Setenv("GIT_CONFIG_NOSYSTEM", "true") defer os.Unsetenv("GIT_CONFIG_NOSYSTEM") // add the hack path which has the git-ask-pass.sh shell script osPath := os.Getenv("PATH") hackPath, err := filepath.Abs("../../hack") assert.NoError(t, err) err = os.Setenv("PATH", fmt.Sprintf("%s:%s", osPath, hackPath)) assert.NoError(t, err) defer func() { _ = os.Setenv("PATH", osPath) }() kust := NewKustomizeApp("./testdata/private-remote-base", &GitCredentials{Username: PrivateGitUsername, Password: PrivateGitPassword}) objs, _, _, err := kust.Build(nil) assert.NoError(t, err) assert.Len(t, objs, 2) }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
example/go/demo/main.go
package main import ( "log" "math/rand" "net/http" "os" "runtime" "sync" "time" "github.com/jpillora/velox" ) //debug enables goroutine and memory counters const debug = false type Foo struct { //required velox state, adds sync state and a Push() method velox.State //optional mutex, prevents race conditions (foo.Push will make use of the sync.Locker interface) sync.Mutex NumConnections int NumGoRoutines int `json:",omitempty"` AllocMem float64 `json:",omitempty"` A, B int C map[string]int D Bar } type Bar struct { X, Y int } func main() { //state we wish to sync foo := &Foo{A: 21, B: 42, C: map[string]int{}} go func() { i := 0 for { //change foo foo.Lock() foo.A++ if i%2 == 0 { foo.B-- } i++ foo.C[string('A'+rand.Intn(26))] = i if i%2 == 0 { j := 0 rmj := rand.Intn(len(foo.C)) for k := range foo.C { if j == rmj { delete(foo.C, k) break } j++ } } if i%5 == 0 { foo.D.X-- foo.D.Y++ } foo.NumConnections = foo.State.NumConnections() //show number of connections 'foo' is currently handling foo.Unlock() //push to all connections foo.Push() //do other stuff... time.Sleep(250 * time.Millisecond) } }() //show memory/goroutine stats if debug { go func() { mem := &runtime.MemStats{} i := 0 for { foo.NumGoRoutines = runtime.NumGoroutine() runtime.ReadMemStats(mem) foo.AllocMem = float64(mem.Alloc) time.Sleep(100 * time.Millisecond) i++ // if i%10 == 0 { runtime.GC() } foo.Push() } }() } //sync handlers http.Handle("/velox.js", velox.JS) http.Handle("/sync", velox.SyncHandler(foo)) //index handler http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html") w.Write(indexhtml) }) //listen! port := os.Getenv("PORT") if port == "" { port = "3000" } log.Printf("Listening on :%s...", port) s := http.Server{ Addr: ":" + port, } log.Fatal(s.ListenAndServe()) } var indexhtml = []byte(` <!-- documentation --> Client:<br> <pre id="code">Status: &lt;div>&lt;b id="status">disconnected&lt;/b>&lt;/div> &lt;pre id="example">&lt;/pre> &lt;script src="/velox.js">&lt;/script> &lt;script> var foo = {}; var v = velox("/sync", foo); v.onchange = function(isConnected) { document.querySelector("#status").innerHTML = isConnected ? "connected" : "disconnected"; }; v.onupdate = function() { document.querySelector("#example").innerHTML = JSON.stringify(foo, null, 2); }; &lt;/script> </pre> <a href="https://github.com/jpillora/velox"><img style="position: absolute; z-index: 2; top: 0; right: 0; border: 0;" src="https://s3.amazonaws.com/github/ribbons/forkme_right_darkblue_121621.png" alt="Fork me on GitHub"></a> <hr> Server:<br> <a href="https://github.com/jpillora/velox/blob/master/example/go/demo/main.go" target="_blank"> https://github.com/jpillora/velox/blob/master/example/go/demo/main.go </a> <hr> <!-- example --> <div>Status: <b id="status">disconnected</b></div> <pre id="example"></pre> <script src="/velox.js?dev=1"></script> <script> var foo = {}; var v = velox("/sync", foo); v.onchange = function(isConnected) { document.querySelector("#status").innerHTML = isConnected ? "connected" : "disconnected"; }; v.onupdate = function() { document.querySelector("#example").innerHTML = JSON.stringify(foo, null, 2); }; </script> `)
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
LAMP.py
#!/usr/bin/python from __future__ import division from __future__ import print_function """ This file serves as an example of how to a) select a problem to be solved b) select a network type c) train the network to minimize recovery MSE """ import numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # BE QUIET!!!! os.environ['KMP_DUPLICATE_LIB_OK']='True' import tensorflow as tf np.random.seed(1) # numpy is good about making repeatable output tf.set_random_seed(1) # on the other hand, this is basically useless (see issue 9171) # import our problems, networks and training modules from tools import problems,networks,train L=10000 M=250 N=500 SNR=20 pnz=.1 untied=False T=8 shrink='bg' # Create the basic problem structure. prob = problems.bernoulli_gaussian_trial(kappa=None,M=M,N=N,L=L,pnz=pnz,SNR=SNR) #a Bernoulli-Gaussian x, noisily observed through a random matrix #prob = problems.random_access_problem(2) # 1 or 2 for compressive random access or massive MIMO print('Problem created ...') print('A is:') print(prob.A) # build a LAMP network to solve the problem and get the intermediate results so we can greedily extend and then refine(fine-tune) layers = networks.build_LAMP(prob,T=T,shrink=shrink,untied=False) print('Building layers ... done') # plan the learning training_stages = train.setup_training(layers,prob,trinit=1e-3,refinements=(.5,.1,.01) ) print('Plan the learning ... done') # do the learning (takes a while) print('Do the learning (takes a while)') # sess = train.do_training(training_stages,prob,'LAMP_bg_giid.npz') sess = train.do_training(training_stages,prob,'LAMP_bg_giid.npz',10,10,50) # train.plot_estimate_to_test_message(sess, training_stages, prob, 'LAMP_bg_giid.npz' ) # train.test_vector_sizes(sess, training_stages, prob, 'LAMP_bg_giid.npz' ) print('Evaluating network on test data ...') train.evaluate_nmse(sess, training_stages, prob, 'LAMP_bg_giid.npz',SNR=SNR, L=L) train_vars = train.get_train_variables(sess) stop = 1;
[]
[]
[ "KMP_DUPLICATE_LIB_OK", "TF_CPP_MIN_LOG_LEVEL" ]
[]
["KMP_DUPLICATE_LIB_OK", "TF_CPP_MIN_LOG_LEVEL"]
python
2
0
tools/testing/resultdb/main_test.go
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package main import ( "flag" "log" "os" "path/filepath" "testing" sinkpb "go.chromium.org/luci/resultdb/sink/proto/v1" ) var testDataFlag = flag.String("test_data_dir", "testdata", "Path to testdata/; only used in GN build") func TestGetLUCICtx(t *testing.T) { old := os.Getenv("LUCI_CONTEXT") defer os.Setenv("LUCI_CONTEXT", old) os.Setenv("LUCI_CONTEXT", filepath.Join(*testDataFlag, "lucictx.json")) ctx, err := resultSinkCtx() if err != nil { t.Errorf("Cannot parse LUCI_CONTEXT: %v", err) } if ctx.ResultSinkAddr != "result.sink" { t.Errorf("Incorrect value parsed for result_sink address. Got %s", ctx.ResultSinkAddr) } if ctx.AuthToken != "token" { t.Errorf("Incorrect value parsed for result_sink auth_token field. Got %s", ctx.AuthToken) } } func TestParse2Summary(t *testing.T) { t.Parallel() const chunkSize = 5 var requests []*sinkpb.ReportTestResultsRequest expectRequests := 0 for _, name := range []string{"summary.json", "summary2.json"} { summary, err := ParseSummary(filepath.Join(*testDataFlag, name)) if err != nil { log.Fatal(err) } testResults := SummaryToResultSink(summary, name) expectRequests += (len(testResults)-1)/chunkSize + 1 requests = append(requests, createTestResultsRequests(testResults, chunkSize)...) for _, testResult := range testResults { if len(testResult.TestId) == 0 { t.Errorf("Empty testId is not allowed.") } } } if len(requests) != expectRequests { t.Errorf("Incorrect number of request chuncks, got: %d want %d", len(requests), expectRequests) } }
[ "\"LUCI_CONTEXT\"" ]
[]
[ "LUCI_CONTEXT" ]
[]
["LUCI_CONTEXT"]
go
1
0
app.py
#!/usr/bin/env python import urllib import json import os import time from flask import Flask from flask import request from flask import make_response # Flask app should start in global layout app = Flask(__name__) @app.route('/webhook', methods=['POST']) def webhook(): req = request.get_json(silent=True, force=True) print("Request:") print(json.dumps(req, indent=4)) res = makeWebhookResult(req) res = json.dumps(res, indent=4) print(res) r = make_response(res) r.headers['Content-Type'] = 'application/json' return r def makeWebhookResult(req): if req.get("result").get("action") != "shipping.cost": return {} result = req.get("result") parameters = result.get("parameters") zone = parameters.get("shipping-zone") cost = {'Europe':100, 'North America':200, 'South America':300, 'Asia':400, 'Africa':500} #speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros." speech="You need any help Brother??" print("Response:") print(speech) return { "speech": speech, "displayText": speech, #"data": {}, # "contextOut": [], "source": "apiai-onlinestore-shipping" } if __name__ == '__main__': port = int(os.getenv('PORT', 5000)) print("Starting app on port %d" % port) app.run(debug=True, port=port, host='0.0.0.0')
[]
[]
[ "PORT" ]
[]
["PORT"]
python
1
0
users/views.py
from django.urls import reverse, reverse_lazy from django.views.generic import DetailView, ListView from django.views.generic.edit import UpdateView, DeleteView from django.http import Http404 from django.contrib import auth from django.shortcuts import get_object_or_404 def can_edit_user(logged_user, target_user): """ Is logged in user allowed to edit target user """ if logged_user == target_user: return True if logged_user.is_staff: return True return False class CvUserList(ListView): model = auth.get_user_model() def get_context_data(self, **kwargs): context = super(CvUserList, self).get_context_data(**kwargs) context['messages'] = self.request.GET.get('message', '') return context class CvUserDetail(DetailView): template_name = 'auth/profile.html' context_object_name = 'target_user' def get_object(self, queryset=None): target_username = self.kwargs.get('slug', '') if target_username: target_user = get_object_or_404(auth.get_user_model(), username=target_username) return target_user return auth.get_user(self.request) class CvUserUpdate(UpdateView): model = auth.get_user_model() slug_field = 'username' fields = ['first_name', 'last_name'] def get_object(self): target_user = super(CvUserUpdate, self).get_object() if can_edit_user(logged_user=self.request.user, target_user=target_user): return target_user # Todo: Smarter way to handle this raise Http404 def get_context_data(self, **kwargs): context = super(CvUserUpdate, self).get_context_data(**kwargs) context['message'] = self.request.GET.get('message', '') return context def get_success_url(self): if self.object: return reverse_lazy('user_detail', args=[self.object.username]) else: return reverse('user_list') class CvUserDelete(DeleteView): slug_field = 'username' model = auth.models.User success_url = reverse_lazy('home') def get_object(self): target_user = super(CvUserDelete, self).get_object() if can_edit_user(logged_user=self.request.user, target_user=target_user): return target_user # Todo: Smarter way to handle this raise Http404 def render_to_response(self, context, **response_kwargs): return super(CvUserDelete, self).render_to_response(context, **response_kwargs)
[]
[]
[]
[]
[]
python
null
null
null
settings.py
from dotenv import load_dotenv import os load_dotenv() DISCORD_CLIENT = os.getenv('DISCORD_CLIENT') DISCORD_GUILD = os.getenv('DISCORD_GUILD') DISCORD_CHANNEL = os.getenv('DISCORD_CHANNEL') WORLDCOIN = os.getenv('WORLDCOIN')
[]
[]
[ "DISCORD_CLIENT", "WORLDCOIN", "DISCORD_CHANNEL", "DISCORD_GUILD" ]
[]
["DISCORD_CLIENT", "WORLDCOIN", "DISCORD_CHANNEL", "DISCORD_GUILD"]
python
4
0
aws/cloudformation/resources/zipToS3BucketResourceTest_test.go
package resources import ( "encoding/json" "os" "testing" "time" gocf "github.com/mweagle/go-cloudformation" "github.com/rs/zerolog" ) func testEnabled() bool { return os.Getenv("TEST_SRC_S3_KEY") != "" } func mockZipResourceEvent(t *testing.T) *CloudFormationLambdaEvent { props := map[string]interface{}{ "DestBucket": gocf.String(os.Getenv("TEST_DEST_S3_BUCKET")), "SrcBucket": gocf.String(os.Getenv("TEST_SRC_S3_BUCKET")), "SrcKeyName": gocf.String(os.Getenv("TEST_SRC_S3_KEY")), "Manifest": map[string]interface{}{ "Some": "Data", }, } bytes, bytesErr := json.Marshal(props) if bytesErr != nil { t.Fatalf("Failed to serialize mock custom resource event") } return &CloudFormationLambdaEvent{ RequestType: CreateOperation, RequestID: time.Now().String(), StackID: "1234567890", LogicalResourceID: "logicalID", ResourceProperties: json.RawMessage(bytes), } } func TestUnzip(t *testing.T) { if !testEnabled() { return } resUnzip := gocf.NewResourceByType(ZipToS3Bucket) zipResource := resUnzip.(*ZipToS3BucketResource) event := mockZipResourceEvent(t) // Put it logger := zerolog.New(os.Stdout).With().Timestamp().Logger() awsSession := awsSession(&logger) createOutputs, createError := zipResource.Create(awsSession, event, &logger) if nil != createError { t.Errorf("Failed to create Unzip resource: %s", createError) } t.Logf("TestUnzip outputs: %#v", createOutputs) deleteOutputs, deleteError := zipResource.Delete(awsSession, event, &logger) if nil != deleteError { t.Errorf("Failed to create Unzip resource: %s", createError) } t.Logf("TestUnzip outputs: %#v", deleteOutputs) }
[ "\"TEST_SRC_S3_KEY\"", "\"TEST_DEST_S3_BUCKET\"", "\"TEST_SRC_S3_BUCKET\"", "\"TEST_SRC_S3_KEY\"" ]
[]
[ "TEST_SRC_S3_KEY", "TEST_SRC_S3_BUCKET", "TEST_DEST_S3_BUCKET" ]
[]
["TEST_SRC_S3_KEY", "TEST_SRC_S3_BUCKET", "TEST_DEST_S3_BUCKET"]
go
3
0
app.py
# from datetime import datetime import os from flask import Flask, request, jsonify from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL'] app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db = SQLAlchemy(app) class TinyWebDB(db.Model): __tablename__ = 'tinywebdb' tag = db.Column(db.String, primary_key=True, nullable=False) value = db.Column(db.String, nullable=False) # The 'date' column is needed for deleting older entries, so not really required # date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) db.create_all() db.session.commit() ## COMMUN functions ########################## def store_a_value(tag, value): if tag: # Prevent Duplicate Key error by updating the existing tag existing_tag = TinyWebDB.query.filter_by(tag=tag).first() if existing_tag: #return 'EXISTING ' + tag + ', ' + value existing_tag.value = value db.session.commit() else: #return 'NEW ' + tag + ', ' + value data = TinyWebDB(tag=tag, value=value) db.session.add(data) db.session.commit() return jsonify(['STORED', tag, value]) return 'Invalid Tag!' def add_item_to_tag_value(tag, item): if tag: existing_tag = TinyWebDB.query.filter_by(tag=tag).first() if existing_tag: current_value = existing_tag.value if isinstance(current_value, str): new_value = current_value[0:len(current_value)-1] new_value += ',' + str(item) + ']' #return tag + ', ' + item + ', ' + current_value + ', ' + new_value existing_tag.value = new_value db.session.commit() return jsonify(['ADDED', tag, new_value]) else: return 'Invalid value format!' return 'Invalid Tag!' ## WEB APP ########################## @app.route('/') def hello_world(): tag = 'appinventor_user_actionable_scores_ranking' return 'Hello, I\'m UP!' @app.route('/storeavalue', methods=['POST']) #OK def store_a_value_POST(): tag = request.form['tag'] value = request.form['value'] return store_a_value(tag, value) @app.route('/getvalue', methods=['POST']) #OK def get_value(): tag = request.form['tag'] if tag: value = TinyWebDB.query.filter_by(tag=tag).first().value return jsonify(['VALUE', tag, value]) return 'Invalid Tag!' @app.route('/deleteentry') def delete_entry(): # docs = db.search(User.name == 'John') # for doc in docs: # db.session.remove(where('value') == '') # db.session.commit() # return 'Empty entries have been deleted!' return 'Not yet implemented!' @app.route('/actionable/user/<user>') # OK def get_scores(user): tag = 'appinventor_user_actionable_scores_' + user #request.form['tag'] nb_play = 0 sum_play = 0 average = 0.00 if tag: value = TinyWebDB.query.filter_by(tag=tag).first().value.replace("[", "").replace("]", "").split(','); if value: nb_play = len(value) for v in value: sum_play = sum_play + int(v) nb_play = len(value) average = format(sum_play/nb_play, '.2f') return jsonify(['VALUE', 'nb', nb_play, 'sum', sum_play, 'average', average]) else: return 'Invalid user: '+user return 'User name missing: ' @app.route('/actionable/getuseraverage', methods=['POST']) #OK def get_user_average(): user = request.form['user'] tag = 'appinventor_user_actionable_scores_' + user nb_play = 0 sum_play = 0 average = 0.00 if tag: value = TinyWebDB.query.filter_by(tag=tag).first().value.replace("[", "").replace("]", "").split(','); nb_play = len(value) for v in value: sum_play = sum_play + int(v) nb_play = len(value) average = format(sum_play/nb_play, '.2f') return jsonify(['VALUE', 'nb', nb_play, 'sum', sum_play, 'average', average]) return 'Invalid user: '+user @app.route('/actionable/getranking') #, methods=['GET', 'POST']) #OK if users list is good def get_ranking(): board = [] tag = 'appinventor_user_actionable_scores_ranking' users = TinyWebDB.query.filter_by(tag=tag).first().value; if users: users = users.replace("[", "").replace("]", "").replace('"', '').split(',') for user in users: tag = 'appinventor_user_actionable_scores_' + user nb_play = 0 sum_play = 0 average = 0.00 existing_tag = TinyWebDB.query.filter_by(tag=tag).first(); if existing_tag: value = existing_tag.value if value.find(',')>-1: value = value.replace("[", "").replace("]", "").split(',') nb_play = len(value) for v in value: sum_play = sum_play + int(v) nb_play = len(value) average = format(sum_play/nb_play, '.2f') board.append([user, 'nb', nb_play, 'sum', sum_play, 'average', average]) #board.append(value) return jsonify(board) @app.route('/actionable/storeascore', methods=['POST']) #OK def store_a_score(): user = request.form['user'] score = int(request.form['score']) tag = 'appinventor_user_actionable_scores_' + user existing_tag = TinyWebDB.query.filter_by(tag=tag).first(); if existing_tag: return add_item_to_tag_value(tag, score) else: return store_a_value(tag, '[' + str(score) + ']') @app.route('/actionable/create/user', methods=['POST']) #OK def actionable_create_user(): user = request.form['user'] #tag = 'appinventor_user_actionable_scores_' + user #empty_scores = '[]' #store_a_value(tag, empty_scores) tag = 'appinventor_user_actionable_scores_ranking' return add_item_to_tag_value(tag, '"'+user+'"') if __name__ == '__main__': app.run()
[]
[]
[ "DATABASE_URL" ]
[]
["DATABASE_URL"]
python
1
0
graphwalker/test/interaction_test.py
# -*- coding: utf-8 -*- # Copyright (c) 2013 Spotify AB import os import subprocess import unittest from graphwalker import planning from graphwalker import halting from graphwalker import reporting from graphwalker import execution from graphwalker import graph class TestInteraction(unittest.TestCase): def setUp(self): here = os.path.normpath(os.path.join(__file__, '..', '..')) self.argl = ['python', os.path.join(here, 'cli.py')] self._old_pythonpath = os.environ.get('PYTHONPATH', None) os.environ['PYTHONPATH'] = '.' def tearDown(self): old = getattr(self, '_old_pythonpath', None) if old is not None: os.environ['PYTHONPATH'] = old else: del os.environ['PYTHONPATH'] def test_cli_models_only(self): ex = "graphwalker/test/examples/%s.tgf" argl = self.argl + [ex % w for w in ("first", "second", "third")] self.assertEqual(subprocess.call(argl), 0) def test_cli_models_with_actor(self): ex = "graphwalker/test/examples/%s.tgf" argl = self.argl + [ex % w for w in ("first", "second", "third")] argl = argl + ['graphwalker.dummy.Mute'] self.assertEqual(subprocess.call(argl), 0) def test_by_interaction(self): r"""Interaction self-test. For comparison, try this: PYTHONPATH=. python graphwalker/cli.py --reporter=Print \ graphwalker/test/examples/selftest.graphml \ graphwalker.test.interactor.Interactor """ outer = self class HijackReporter(reporting.ReportingPlugin): def finalize(self, failure=False): outer.assertFalse(failure) reporter = HijackReporter() plan = planning.build(['Random']) stop = halting.build('Coverage') model = graph.Graph.read('graphwalker/test/examples/selftest.graphml') actor = 'graphwalker.test.interactor.Interactor' exe = execution.Executor(actor, reporter) context = { 'plan': plan, 'stop': stop, 'actor': actor, 'reporter': reporter, 'executor': exe, 'model': model} stop.start(model, context) path = plan(model, stop, 'Start', context) exe.run('inner', path, context)
[]
[]
[ "PYTHONPATH" ]
[]
["PYTHONPATH"]
python
1
0
hosted.go
package servermanager import ( "os" ) var ( IsHosted = os.Getenv("HOSTED") == "true" MaxClientsOverride = formValueAsInt(os.Getenv("MAX_CLIENTS_OVERRIDE")) IsPremium = "true" ) func Premium() bool { return IsPremium == "true" }
[ "\"HOSTED\"", "\"MAX_CLIENTS_OVERRIDE\"" ]
[]
[ "HOSTED", "MAX_CLIENTS_OVERRIDE" ]
[]
["HOSTED", "MAX_CLIENTS_OVERRIDE"]
go
2
0
meiduo/celery_tasks/main.py
from celery import Celery import os #實例化對象 if not os.getenv('DJANGO_SETTINGS_MODULE'): # 如果没有该环境变量 # os.environ = {'DJANGO_SETTINGS_MODULE': 'meiduo_mall.settings.dev'} os.environ['DJANGO_SETTINGS_MODULE'] = 'meiduo.settings.dev' celery_app = Celery() #配置路徑 celery_app.config_from_object('celery_tasks.config') #任務路徑 celery_app.autodiscover_tasks(['celery_tasks.sms','celery_tasks.email'])
[]
[]
[ "DJANGO_SETTINGS_MODULE" ]
[]
["DJANGO_SETTINGS_MODULE"]
python
1
0
src/cmd/compile/internal/ssa/stmtlines_test.go
package ssa_test import ( cmddwarf "cmd/internal/dwarf" "cmd/internal/quoted" "debug/dwarf" "debug/elf" "debug/macho" "debug/pe" "fmt" "internal/testenv" "internal/xcoff" "io" "os" "runtime" "sort" "testing" ) func open(path string) (*dwarf.Data, error) { if fh, err := elf.Open(path); err == nil { return fh.DWARF() } if fh, err := pe.Open(path); err == nil { return fh.DWARF() } if fh, err := macho.Open(path); err == nil { return fh.DWARF() } if fh, err := xcoff.Open(path); err == nil { return fh.DWARF() } return nil, fmt.Errorf("unrecognized executable format") } func must(err error) { if err != nil { panic(err) } } type Line struct { File string Line int } func TestStmtLines(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; no DWARF symbol table in executables") } if runtime.GOOS == "aix" { extld := os.Getenv("CC") if extld == "" { extld = "gcc" } extldArgs, err := quoted.Split(extld) if err != nil { t.Fatal(err) } enabled, err := cmddwarf.IsDWARFEnabledOnAIXLd(extldArgs) if err != nil { t.Fatal(err) } if !enabled { t.Skip("skipping on aix: no DWARF with ld version < 7.2.2 ") } } lines := map[Line]bool{} dw, err := open(testenv.GoToolPath(t)) must(err) rdr := dw.Reader() rdr.Seek(0) for { e, err := rdr.Next() must(err) if e == nil { break } if e.Tag != dwarf.TagCompileUnit { continue } pkgname, _ := e.Val(dwarf.AttrName).(string) if pkgname == "runtime" { continue } if pkgname == "crypto/elliptic/internal/fiat" { continue // golang.org/issue/49372 } if e.Val(dwarf.AttrStmtList) == nil { continue } lrdr, err := dw.LineReader(e) must(err) var le dwarf.LineEntry for { err := lrdr.Next(&le) if err == io.EOF { break } must(err) fl := Line{le.File.Name, le.Line} lines[fl] = lines[fl] || le.IsStmt } } nonStmtLines := []Line{} for line, isstmt := range lines { if !isstmt { nonStmtLines = append(nonStmtLines, line) } } if runtime.GOARCH == "amd64" { if len(nonStmtLines)*100 > len(lines) { // > 99% obtained on amd64, no backsliding t.Errorf("Saw too many (amd64, > 1%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", len(lines), len(nonStmtLines)) } } else if len(nonStmtLines)*100 > 2*len(lines) { // expect 98% elsewhere. t.Errorf("Saw too many (not amd64, > 2%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", len(lines), len(nonStmtLines)) } t.Logf("Saw %d out of %d lines without statement marks", len(nonStmtLines), len(lines)) if testing.Verbose() { sort.Slice(nonStmtLines, func(i, j int) bool { if nonStmtLines[i].File != nonStmtLines[j].File { return nonStmtLines[i].File < nonStmtLines[j].File } return nonStmtLines[i].Line < nonStmtLines[j].Line }) for _, l := range nonStmtLines { t.Logf("%s:%d has no DWARF is_stmt mark\n", l.File, l.Line) } } t.Logf("total=%d, nostmt=%d\n", len(lines), len(nonStmtLines)) }
[ "\"CC\"" ]
[]
[ "CC" ]
[]
["CC"]
go
1
0
cmd/dockerd/options.go
package dockerd import ( "fmt" "os" "path/filepath" "github.com/Sirupsen/logrus" cliconfig "github.com/docker/docker/cli/config" "github.com/docker/docker/daemon/config" "github.com/docker/docker/opts" "github.com/docker/go-connections/tlsconfig" "github.com/spf13/pflag" ) const ( // DefaultCaFile is the default filename for the CA pem file DefaultCaFile = "ca.pem" // DefaultKeyFile is the default filename for the key pem file DefaultKeyFile = "key.pem" // DefaultCertFile is the default filename for the cert pem file DefaultCertFile = "cert.pem" // FlagTLSVerify is the flag name for the TLS verification option FlagTLSVerify = "tlsverify" ) var ( dockerCertPath = os.Getenv("DOCKER_CERT_PATH") dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" ) type daemonOptions struct { version bool configFile string daemonConfig *config.Config flags *pflag.FlagSet Debug bool Hosts []string LogLevel string TLS bool TLSVerify bool TLSOptions *tlsconfig.Options } // newDaemonOptions returns a new daemonFlags func newDaemonOptions(config *config.Config) *daemonOptions { return &daemonOptions{ daemonConfig: config, } } // InstallFlags adds flags for the common options on the FlagSet func (o *daemonOptions) InstallFlags(flags *pflag.FlagSet) { if dockerCertPath == "" { dockerCertPath = cliconfig.Dir() } flags.BoolVarP(&o.Debug, "debug", "D", false, "Enable debug mode") flags.StringVarP(&o.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`) flags.BoolVar(&o.TLS, "tls", false, "Use TLS; implied by --tlsverify") flags.BoolVar(&o.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") o.TLSOptions = &tlsconfig.Options{ CAFile: filepath.Join(dockerCertPath, DefaultCaFile), CertFile: filepath.Join(dockerCertPath, DefaultCertFile), KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), } tlsOptions := o.TLSOptions flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") hostOpt := opts.NewNamedListOptsRef("hosts", &o.Hosts, opts.ValidateHost) flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") } // SetDefaultOptions sets default values for options after flag parsing is // complete func (o *daemonOptions) SetDefaultOptions(flags *pflag.FlagSet) { // Regardless of whether the user sets it to true or false, if they // specify --tlsverify at all then we need to turn on TLS // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need // to check that here as well if flags.Changed(FlagTLSVerify) || o.TLSVerify { o.TLS = true } if !o.TLS { o.TLSOptions = nil } else { tlsOptions := o.TLSOptions tlsOptions.InsecureSkipVerify = !o.TLSVerify // Reset CertFile and KeyFile to empty string if the user did not specify // the respective flags and the respective default files were not found. if !flags.Changed("tlscert") { if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { tlsOptions.CertFile = "" } } if !flags.Changed("tlskey") { if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { tlsOptions.KeyFile = "" } } } } // setLogLevel sets the logrus logging level func setLogLevel(logLevel string) { if logLevel != "" { lvl, err := logrus.ParseLevel(logLevel) if err != nil { fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) os.Exit(1) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } }
[ "\"DOCKER_CERT_PATH\"", "\"DOCKER_TLS_VERIFY\"" ]
[]
[ "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY" ]
[]
["DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY"]
go
2
0
test-case/install/build/catkin_generated/installspace/_setup_util.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Software License Agreement (BSD License) # # Copyright (c) 2012, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. '''This file generates shell code for the setup.SHELL scripts to set environment variables''' from __future__ import print_function import argparse import copy import errno import os import platform import sys CATKIN_MARKER_FILE = '.catkin' system = platform.system() IS_DARWIN = (system == 'Darwin') IS_WINDOWS = (system == 'Windows') # subfolder of workspace prepended to CMAKE_PREFIX_PATH ENV_VAR_SUBFOLDERS = { 'CMAKE_PREFIX_PATH': '', 'CPATH': 'include', 'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')], 'PATH': 'bin', 'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')], 'PYTHONPATH': 'lib/python2.7/dist-packages', } def rollback_env_variables(environ, env_var_subfolders): ''' Generate shell code to reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH. This does not cover modifications performed by environment hooks. ''' lines = [] unmodified_environ = copy.copy(environ) for key in sorted(env_var_subfolders.keys()): subfolders = env_var_subfolders[key] if not isinstance(subfolders, list): subfolders = [subfolders] value = _rollback_env_variable(unmodified_environ, key, subfolders) if value is not None: environ[key] = value lines.append(assignment(key, value)) if lines: lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH')) return lines def _rollback_env_variable(environ, name, subfolders): ''' For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder. :param subfolders: list of str '' or subfoldername that may start with '/' :returns: the updated value of the environment variable. ''' value = environ[name] if name in environ else '' env_paths = [path for path in value.split(os.pathsep) if path] value_modified = False for subfolder in subfolders: if subfolder: if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)): subfolder = subfolder[1:] if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)): subfolder = subfolder[:-1] for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True): path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path path_to_remove = None for env_path in env_paths: env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path if env_path_clean == path_to_find: path_to_remove = env_path break if path_to_remove: env_paths.remove(path_to_remove) value_modified = True new_value = os.pathsep.join(env_paths) return new_value if value_modified else None def _get_workspaces(environ, include_fuerte=False, include_non_existing=False): ''' Based on CMAKE_PREFIX_PATH return all catkin workspaces. :param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool`` ''' # get all cmake prefix paths env_name = 'CMAKE_PREFIX_PATH' value = environ[env_name] if env_name in environ else '' paths = [path for path in value.split(os.pathsep) if path] # remove non-workspace paths workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))] return workspaces def prepend_env_variables(environ, env_var_subfolders, workspaces): ''' Generate shell code to prepend environment variables for the all workspaces. ''' lines = [] lines.append(comment('prepend folders of workspaces to environment variables')) paths = [path for path in workspaces.split(os.pathsep) if path] prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '') lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix)) for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']): subfolder = env_var_subfolders[key] prefix = _prefix_env_variable(environ, key, paths, subfolder) lines.append(prepend(environ, key, prefix)) return lines def _prefix_env_variable(environ, name, paths, subfolders): ''' Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items. ''' value = environ[name] if name in environ else '' environ_paths = [path for path in value.split(os.pathsep) if path] checked_paths = [] for path in paths: if not isinstance(subfolders, list): subfolders = [subfolders] for subfolder in subfolders: path_tmp = path if subfolder: path_tmp = os.path.join(path_tmp, subfolder) # skip nonexistent paths if not os.path.exists(path_tmp): continue # exclude any path already in env and any path we already added if path_tmp not in environ_paths and path_tmp not in checked_paths: checked_paths.append(path_tmp) prefix_str = os.pathsep.join(checked_paths) if prefix_str != '' and environ_paths: prefix_str += os.pathsep return prefix_str def assignment(key, value): if not IS_WINDOWS: return 'export %s="%s"' % (key, value) else: return 'set %s=%s' % (key, value) def comment(msg): if not IS_WINDOWS: return '# %s' % msg else: return 'REM %s' % msg def prepend(environ, key, prefix): if key not in environ or not environ[key]: return assignment(key, prefix) if not IS_WINDOWS: return 'export %s="%s$%s"' % (key, prefix, key) else: return 'set %s=%s%%%s%%' % (key, prefix, key) def find_env_hooks(environ, cmake_prefix_path): ''' Generate shell code with found environment hooks for the all workspaces. ''' lines = [] lines.append(comment('found environment hooks in workspaces')) generic_env_hooks = [] generic_env_hooks_workspace = [] specific_env_hooks = [] specific_env_hooks_workspace = [] generic_env_hooks_by_filename = {} specific_env_hooks_by_filename = {} generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh' specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None # remove non-workspace paths workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))] for workspace in reversed(workspaces): env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d') if os.path.isdir(env_hook_dir): for filename in sorted(os.listdir(env_hook_dir)): if filename.endswith('.%s' % generic_env_hook_ext): # remove previous env hook with same name if present if filename in generic_env_hooks_by_filename: i = generic_env_hooks.index(generic_env_hooks_by_filename[filename]) generic_env_hooks.pop(i) generic_env_hooks_workspace.pop(i) # append env hook generic_env_hooks.append(os.path.join(env_hook_dir, filename)) generic_env_hooks_workspace.append(workspace) generic_env_hooks_by_filename[filename] = generic_env_hooks[-1] elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext): # remove previous env hook with same name if present if filename in specific_env_hooks_by_filename: i = specific_env_hooks.index(specific_env_hooks_by_filename[filename]) specific_env_hooks.pop(i) specific_env_hooks_workspace.pop(i) # append env hook specific_env_hooks.append(os.path.join(env_hook_dir, filename)) specific_env_hooks_workspace.append(workspace) specific_env_hooks_by_filename[filename] = specific_env_hooks[-1] env_hooks = generic_env_hooks + specific_env_hooks env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace count = len(env_hooks) lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count)) for i in range(count): lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i])) lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i])) return lines def _parse_arguments(args=None): parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.') parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context') return parser.parse_known_args(args=args)[0] if __name__ == '__main__': try: try: args = _parse_arguments() except Exception as e: print(e, file=sys.stderr) sys.exit(1) # environment at generation time CMAKE_PREFIX_PATH = '/home/george/catkin_ws/devel;/opt/ros/indigo'.split(';') # prepend current workspace if not already part of CPP base_path = os.path.dirname(__file__) if base_path not in CMAKE_PREFIX_PATH: CMAKE_PREFIX_PATH.insert(0, base_path) CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH) environ = dict(os.environ) lines = [] if not args.extend: lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS) lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH) lines += find_env_hooks(environ, CMAKE_PREFIX_PATH) print('\n'.join(lines)) # need to explicitly flush the output sys.stdout.flush() except IOError as e: # and catch potential "broken pipe" if stdout is not writable # which can happen when piping the output to a file but the disk is full if e.errno == errno.EPIPE: print(e, file=sys.stderr) sys.exit(2) raise sys.exit(0)
[]
[]
[]
[]
[]
python
0
0
examples/pwr_run/checkpointing/nonpc_short/timed_pwr/job36.py
""" #Trains a ResNet on the CIFAR10 dataset. """ from __future__ import print_function import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, Input, Flatten from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras.callbacks import ReduceLROnPlateau, TensorBoard from keras.preprocessing.image import ImageDataGenerator from keras.regularizers import l2 from keras import backend as K from keras.models import Model from keras.datasets import cifar10 from keras.applications.vgg16 import VGG16 from keras.applications.vgg19 import VGG19 from keras import models, layers, optimizers from datetime import datetime import tensorflow as tf import numpy as np import os import pdb import sys import argparse import time import signal import glob import json import send_signal parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training') parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name') parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint') parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use') parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)') parser.set_defaults(resume=False) args = parser.parse_args() os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num # Training parameters batch_size = 32 args_lr = 0.0023 args_model = 'vgg19' epoch_begin_time = 0 job_name = sys.argv[0].split('.')[0] save_files = '/scratch/li.baol/checkpoint_max_pwr/' + job_name + '*' total_epochs = 78 starting_epoch = 0 # first step is to update the PID pid_dict = {} with open('pid_lock.json', 'r') as fp: pid_dict = json.load(fp) pid_dict[job_name] = os.getpid() json_file = json.dumps(pid_dict) with open('pid_lock.json', 'w') as fp: fp.write(json_file) os.rename('pid_lock.json', 'pid.json') if args.resume: save_file = glob.glob(save_files)[0] # epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0]) starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1]) data_augmentation = True num_classes = 10 # Subtracting pixel mean improves accuracy subtract_pixel_mean = True n = 3 # Model name, depth and version model_type = args.tc #'P100_resnet50_he_256_1' # Load the CIFAR10 data. (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Normalize data. x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 # If subtract pixel mean is enabled if subtract_pixel_mean: x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') print('y_train shape:', y_train.shape) # Convert class vectors to binary class matrices. y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) if args.resume: print('resume from checkpoint') model = keras.models.load_model(save_file) else: print('train from start') model = models.Sequential() if '16' in args_model: base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '19' in args_model: base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) #base_model.summary() #pdb.set_trace() model.add(base_model) model.add(layers.Flatten()) model.add(layers.BatchNormalization()) model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform')) #model.add(layers.Dropout(0.2)) model.add(layers.BatchNormalization()) model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform')) #model.add(layers.Dropout(0.2)) model.add(layers.BatchNormalization()) model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=args_lr), metrics=['accuracy']) #model.summary() print(model_type) #pdb.set_trace() current_epoch = 0 ################### connects interrupt signal to the process ##################### def terminateProcess(signalNumber, frame): # first record the wasted epoch time global epoch_begin_time if epoch_begin_time == 0: epoch_waste_time = 0 else: epoch_waste_time = int(time.time() - epoch_begin_time) epoch_waste_dict = {} with open('epoch_waste.json', 'r') as fp: epoch_waste_dict = json.load(fp) epoch_waste_dict[job_name] += epoch_waste_time json_file3 = json.dumps(epoch_waste_dict) with open('epoch_waste.json', 'w') as fp: fp.write(json_file3) print('checkpointing the model triggered by kill -15 signal') # delete whatever checkpoint that already exists for f in glob.glob(save_files): os.remove(f) model.save('/scratch/li.baol/checkpoint_max_pwr/' + job_name + '_' + str(current_epoch) + '.h5') print ('(SIGTERM) terminating the process') checkpoint_dict = {} with open('checkpoint.json', 'r') as fp: checkpoint_dict = json.load(fp) checkpoint_dict[job_name] = 1 json_file3 = json.dumps(checkpoint_dict) with open('checkpoint.json', 'w') as fp: fp.write(json_file3) sys.exit() signal.signal(signal.SIGTERM, terminateProcess) ################################################################################# logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch') class PrintEpoch(keras.callbacks.Callback): def on_epoch_begin(self, epoch, logs=None): global current_epoch #remaining_epochs = epochs - epoch current_epoch = epoch print('current epoch ' + str(current_epoch)) global epoch_begin_time epoch_begin_time = time.time() def on_epoch_end(self, epoch, logs=None): # send message of epoch end message = job_name + ' epoch_end' send_signal.send(args.node, 10002, message) my_callback = PrintEpoch() callbacks = [tensorboard_callback, my_callback] #[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback] # Run training # send signal to indicate checkpoint is qualified message = job_name + ' ckpt_qual' send_signal.send(args.node, 10002, message) model.fit(x_train, y_train, batch_size=batch_size, epochs=round(total_epochs/2), validation_data=(x_test, y_test), shuffle=True, callbacks=callbacks, initial_epoch=starting_epoch, verbose=1 ) # Score trained model. scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # send signal to indicate job has finished message = job_name + ' finish' send_signal.send(args.node, 10002, message)
[]
[]
[ "CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
python
2
0
main.go
package main import ( "fmt" "github.com/aws/aws-lambda-go/lambda" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/pkg/errors" "github.com/silinternational/awsops/lib" "os" "strings" ) type EcsRightSizeClusterConfig struct { ClusterNamesCSV string Region string } func main() { lambda.Start(handler) } func handler(config EcsRightSizeClusterConfig) error { if config.Region == "" { config.Region = "us-east-1" } atLeastServiceDesiredCount := false atLeastServiceDesiredCountEnv := os.Getenv("atLeastServiceDesiredCount") if atLeastServiceDesiredCountEnv == "true" { atLeastServiceDesiredCount = true } clusters := strings.Split(config.ClusterNamesCSV, ",") if len(clusters) == 0 { err := errors.New("error: EcsRightSizeConfig.ClusterNamesCSV is empty") fmt.Println(err.Error()) return err } AwsSess := session.Must(session.NewSession(&aws.Config{ Region: aws.String(config.Region), })) for _, cluster := range clusters { err := lib.RightSizeAsgForEcsCluster(AwsSess, cluster, atLeastServiceDesiredCount) if err != nil { fmt.Println(err.Error()) return err } } return nil }
[ "\"atLeastServiceDesiredCount\"" ]
[]
[ "atLeastServiceDesiredCount" ]
[]
["atLeastServiceDesiredCount"]
go
1
0
cmd/mugambo/launcher/misccmd.go
package launcher import ( "fmt" "os" "runtime" "strings" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/params" "gopkg.in/urfave/cli.v1" "github.com/topcoder1208/fantom-fork/gossip" ) var ( versionCommand = cli.Command{ Action: utils.MigrateFlags(version), Name: "version", Usage: "Print version numbers", ArgsUsage: " ", Category: "MISCELLANEOUS COMMANDS", Description: ` The output of this command is supposed to be machine-readable. `, } licenseCommand = cli.Command{ Action: utils.MigrateFlags(license), Name: "license", Usage: "Display license information", ArgsUsage: " ", Category: "MISCELLANEOUS COMMANDS", } ) func version(ctx *cli.Context) error { fmt.Println(strings.Title(clientIdentifier)) fmt.Println("Version:", params.VersionWithMeta()) if gitCommit != "" { fmt.Println("Git Commit:", gitCommit) } if gitDate != "" { fmt.Println("Git Commit Date:", gitDate) } fmt.Println("Architecture:", runtime.GOARCH) fmt.Println("Protocol Versions:", []uint{gossip.ProtocolVersion}) fmt.Println("Go Version:", runtime.Version()) fmt.Println("mugamboting System:", runtime.GOOS) fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) fmt.Printf("GOROOT=%s\n", runtime.GOROOT()) return nil } func license(_ *cli.Context) error { // TODO: license text fmt.Println(``) return nil }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
vetcalc/wsgi.py
""" WSGI config for vetcalc project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vetcalc.settings") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
conans/model/conan_file.py
import os from contextlib import contextmanager from conans import tools # @UnusedImport KEEP THIS! Needed for pyinstaller to copy to exe. from conans.client.tools.env import pythonpath from conans.errors import ConanException from conans.model.build_info import DepsCppInfo from conans.model.env_info import DepsEnvInfo, EnvValues from conans.model.options import Options, PackageOptions, OptionsValues from conans.model.requires import Requirements from conans.model.user_info import DepsUserInfo from conans.paths import RUN_LOG_NAME from conans.tools import environment_append, no_op from conans.client.output import Color def create_options(conanfile): try: package_options = PackageOptions(getattr(conanfile, "options", None)) options = Options(package_options) default_options = getattr(conanfile, "default_options", None) if default_options: if isinstance(default_options, (list, tuple)): default_values = OptionsValues(default_options) elif isinstance(default_options, str): default_values = OptionsValues.loads(default_options) else: raise ConanException("Please define your default_options as list or " "multiline string") options.values = default_values return options except Exception as e: raise ConanException("Error while initializing options. %s" % str(e)) def create_requirements(conanfile): try: # Actual requirements of this package if not hasattr(conanfile, "requires"): return Requirements() else: if not conanfile.requires: return Requirements() if isinstance(conanfile.requires, tuple): return Requirements(*conanfile.requires) else: return Requirements(conanfile.requires, ) except Exception as e: raise ConanException("Error while initializing requirements. %s" % str(e)) def create_settings(conanfile, settings, local): try: defined_settings = getattr(conanfile, "settings", None) if isinstance(defined_settings, str): defined_settings = [defined_settings] current = defined_settings or {} settings.constraint(current, raise_undefined_field=not local) return settings except Exception as e: raise ConanException("Error while initializing settings. %s" % str(e)) def create_exports(conanfile): if not hasattr(conanfile, "exports"): return None else: if isinstance(conanfile.exports, str): return (conanfile.exports, ) return conanfile.exports def create_exports_sources(conanfile): if not hasattr(conanfile, "exports_sources"): return None else: if isinstance(conanfile.exports_sources, str): return (conanfile.exports_sources, ) return conanfile.exports_sources @contextmanager def _env_and_python(conanfile): with environment_append(conanfile.env): with pythonpath(conanfile): yield def get_env_context_manager(conanfile, without_python=False): if not conanfile.apply_env: return no_op() if without_python: return environment_append(conanfile.env) return _env_and_python(conanfile) class ConanFile(object): """ The base class for all package recipes """ name = None version = None # Any str, can be "1.1" or whatever url = None # The URL where this File is located, as github, to collaborate in package # The license of the PACKAGE, just a shortcut, does not replace or # change the actual license of the source code license = None author = None # Main maintainer/responsible for the package, any format build_policy = None short_paths = False apply_env = True # Apply environment variables from requires deps_env_info and profiles def __init__(self, output, runner, settings, user=None, channel=None, local=None): # User defined generators self.generators = self.generators if hasattr(self, "generators") else ["txt"] if isinstance(self.generators, str): self.generators = [self.generators] # User defined options self.options = create_options(self) self.requires = create_requirements(self) self.settings = create_settings(self, settings, local) try: if self.settings.os_build and self.settings.os: output.writeln("*"*60, front=Color.BRIGHT_RED) output.writeln(" This package defines both 'os' and 'os_build' ", front=Color.BRIGHT_RED) output.writeln(" Please use 'os' for libraries and 'os_build'", front=Color.BRIGHT_RED) output.writeln(" only for build-requires used for cross-building", front=Color.BRIGHT_RED) output.writeln("*"*60, front=Color.BRIGHT_RED) except ConanException: pass self.exports = create_exports(self) self.exports_sources = create_exports_sources(self) # needed variables to pack the project self.cpp_info = None # Will be initialized at processing time self.deps_cpp_info = DepsCppInfo() # environment variables declared in the package_info self.env_info = None # Will be initialized at processing time self.deps_env_info = DepsEnvInfo() # user declared variables self.user_info = None # Keys are the package names, and the values a dict with the vars self.deps_user_info = DepsUserInfo() self.copy = None # initialized at runtime self.copy_deps = None # initialized at runtime # an output stream (writeln, info, warn error) self.output = output # something that can run commands, as os.sytem self._runner = runner self.develop = False # user specified env variables self._env_values = EnvValues() # Updated at runtime, user specified -e self._user = user self._channel = channel self.in_local_cache = False self.description = None # Vars to control the build steps (build(), package()) self.should_configure = True self.should_build = True self.should_install = True @property def env(self): """Apply the self.deps_env_info into a copy of self._env_values (will prioritize the self._env_values, user specified from profiles or -e first, then inherited)""" # Cannot be lazy cached, because it's called in configure node, and we still don't have # the deps_env_info objects available tmp_env_values = self._env_values.copy() tmp_env_values.update(self.deps_env_info) ret, multiple = tmp_env_values.env_dicts(self.name) ret.update(multiple) return ret @property def channel(self): if not self._channel: self._channel = os.getenv("CONAN_CHANNEL") if not self._channel: raise ConanException("CONAN_CHANNEL environment variable not defined, " "but self.channel is used in conanfile") return self._channel @property def user(self): if not self._user: self._user = os.getenv("CONAN_USERNAME") if not self._user: raise ConanException("CONAN_USERNAME environment variable not defined, " "but self.user is used in conanfile") return self._user def collect_libs(self, folder="lib"): self.output.warn("Use 'self.collect_libs' is deprecated, " "use tools.collect_libs(self) instead") return tools.collect_libs(self, folder=folder) @property def build_policy_missing(self): return self.build_policy == "missing" @property def build_policy_always(self): return self.build_policy == "always" def source(self): pass def system_requirements(self): """ this method can be overwritten to implement logic for system package managers, as apt-get You can define self.global_system_requirements = True, if you want the installation to be for all packages (not depending on settings/options/requirements) """ def config_options(self): """ modify options, probably conditioned to some settings. This call is executed before config_settings. E.g. if self.settings.os == "Windows": del self.options.shared # shared/static not supported in win """ def configure(self): """ modify settings, probably conditioned to some options. This call is executed after config_options. E.g. if self.options.header_only: self.settings.clear() This is also the place for conditional requirements """ def build(self): """ build your project calling the desired build tools as done in the command line. E.g. self.run("cmake --build .") Or use the provided build helpers. E.g. cmake.build() """ self.output.warn("This conanfile has no build step") def package(self): """ package the needed files from source and build folders. E.g. self.copy("*.h", src="src/includes", dst="includes") """ self.output.warn("This conanfile has no package step") def package_info(self): """ define cpp_build_info, flags, etc """ def run(self, command, output=True, cwd=None, win_bash=False, subsystem=None, msys_mingw=True): if not win_bash: retcode = self._runner(command, output, os.path.abspath(RUN_LOG_NAME), cwd) else: retcode = tools.run_in_windows_bash(self, bashcmd=command, cwd=cwd, subsystem=subsystem, msys_mingw=msys_mingw) if retcode != 0: raise ConanException("Error %d while executing %s" % (retcode, command)) return retcode def package_id(self): """ modify the conans info, typically to narrow values eg.: conaninfo.package_references = [] """ def test(self): """ test the generated executable. E.g. self.run("./example") """ raise ConanException("You need to create a method 'test' in your test/conanfile.py") def __repr__(self): if self.name and self.version and self._channel and self._user: return "%s/%s@%s/%s" % (self.name, self.version, self.user, self.channel) elif self.name and self.version: return "%s/%s@PROJECT" % (self.name, self.version) else: return "PROJECT"
[]
[]
[ "CONAN_USERNAME", "CONAN_CHANNEL" ]
[]
["CONAN_USERNAME", "CONAN_CHANNEL"]
python
2
0
main.go
package main import ( "bytes" "html/template" "log" "math" "net/http" "net/url" "os" "strconv" "time" "github.com/joho/godotenv" "github.com/sam-hyde/go-news-web-app/news" ) var tpl = template.Must(template.ParseFiles("index.html")) // Search is a struct type Search struct { Query string NextPage int TotalPages int Results *news.Results } func (s *Search) IsLastPage() bool { return s.NextPage >= s.TotalPages } func (s *Search) CurrentPage() int { if s.NextPage == 1 { return s.NextPage } return s.NextPage - 1 } func (s *Search) PreviousPage() int { return s.CurrentPage() - 1 } func indexHandler(w http.ResponseWriter, r *http.Request) { buf := &bytes.Buffer{} err := tpl.Execute(buf, nil) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } buf.WriteTo(w) } func searchHandler(newsapi *news.Client) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { u, err := url.Parse(r.URL.String()) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } params := u.Query() searchQuery := params.Get("q") page := params.Get("page") if page == "" { page = "1" } results, err := newsapi.FetchEverything(searchQuery, page) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } nextPage, err := strconv.Atoi(page) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } search := &Search{ Query: searchQuery, NextPage: nextPage, TotalPages: int(math.Ceil(float64(results.TotalResults) / float64(newsapi.PageSize))), Results: results, } if ok := !search.IsLastPage(); ok { search.NextPage++ } buf := &bytes.Buffer{} err = tpl.Execute(buf, search) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } buf.WriteTo(w) } } func main() { err := godotenv.Load() if err != nil { log.Println("Error loading .env file!") } port := os.Getenv("PORT") if port == "" { port = "3000" } apiKey := os.Getenv("NEWS_API_KEY") if apiKey == "" { log.Fatal("Env: NEWS_API_KEY must be set!") } myClient := &http.Client{Timeout: 10 * time.Second} newsapi := news.NewClient(myClient, apiKey, 20) fs := http.FileServer(http.Dir("assets")) mux := http.NewServeMux() mux.Handle("/assets/", http.StripPrefix("/assets/", fs)) mux.HandleFunc("/search", searchHandler(newsapi)) mux.HandleFunc("/", indexHandler) http.ListenAndServe(":"+port, mux) }
[ "\"PORT\"", "\"NEWS_API_KEY\"" ]
[]
[ "PORT", "NEWS_API_KEY" ]
[]
["PORT", "NEWS_API_KEY"]
go
2
0
share/qt/extract_strings_qt.py
#!/usr/bin/python ''' Extract _("...") strings for translation and convert to Qt stringdefs so that they can be picked up by Qt linguist. ''' from __future__ import division,print_function,unicode_literals from subprocess import Popen, PIPE import glob import operator import os import sys OUT_CPP="qt/pwrbstrings.cpp" EMPTY=['""'] def parse_po(text): """ Parse 'po' format produced by xgettext. Return a list of (msgid,msgstr) tuples. """ messages = [] msgid = [] msgstr = [] in_msgid = False in_msgstr = False for line in text.split('\n'): line = line.rstrip('\r') if line.startswith('msgid '): if in_msgstr: messages.append((msgid, msgstr)) in_msgstr = False # message start in_msgid = True msgid = [line[6:]] elif line.startswith('msgstr '): in_msgid = False in_msgstr = True msgstr = [line[7:]] elif line.startswith('"'): if in_msgid: msgid.append(line) if in_msgstr: msgstr.append(line) if in_msgstr: messages.append((msgid, msgstr)) return messages files = sys.argv[1:] # xgettext -n --keyword=_ $FILES XGETTEXT=os.getenv('XGETTEXT', 'xgettext') if not XGETTEXT: print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr) print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr) exit(1) child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE) (out, err) = child.communicate() messages = parse_po(out.decode('utf-8')) f = open(OUT_CPP, 'w') f.write(""" #include <QtGlobal> // Automatically generated by extract_strings.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif """) f.write('static const char UNUSED *pwrb_strings[] = {\n') messages.sort(key=operator.itemgetter(0)) for (msgid, msgstr) in messages: if msgid != EMPTY: f.write('QT_TRANSLATE_NOOP("pwrb-core", %s),\n' % ('\n'.join(msgid))) f.write('};\n') f.close()
[]
[]
[ "XGETTEXT" ]
[]
["XGETTEXT"]
python
1
0
src/CNN_classification/train.py
import os import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # for ignoring the some of tf warnings import tensorflow as tf from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard from sklearn.model_selection import train_test_split from src.CNN_classification import network from src.CNN_classification import utils def train(X, y, random_state=42, test_size=0.20, stage_train_dir='.', patience=10, epochs=10, batch_size=None, dropout_rate=0.2, dump_model_summary=True, set_lr_scheduler=True, set_checkpoint=True, set_earlystopping=True, set_tensorboard=False, dump_history=True, save_model=True, **kwargs ): N, L = X.shape[0], X.shape[1] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state, ) #stratify=y K = len(np.unique(y_train)) # design the architecture of model model = network.create_model((L, L, 1), K, dropout_rate=dropout_rate) # compile the model opt = tf.keras.optimizers.Adam(learning_rate=1e-4) model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) # Callbacks callbacks = [] if set_lr_scheduler: lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5) callbacks += [lr_scheduler] if set_checkpoint: checkpoint_file = utils.make_path(stage_train_dir, "ckpt-best.h5") checkpoint_cb = ModelCheckpoint(checkpoint_file, save_best_only=True, monitor='val_loss', save_weights_only=False) callbacks += [checkpoint_cb] if set_earlystopping: early_stopping_cb = EarlyStopping(patience=patience, restore_best_weights=True) callbacks += [early_stopping_cb] if set_tensorboard: tensor_board = TensorBoard(log_dir=stage_train_dir) callbacks += [tensor_board] # print model info if dump_model_summary: utils.print_model_summary(model, stage_train_dir) # training the model history = model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=callbacks, epochs=epochs, batch_size=batch_size) if save_model: model.save(utils.make_path(stage_train_dir, 'saved-model.h5')) if dump_history: utils.write_numpy_dic_to_json(history.history, utils.make_path(stage_train_dir, 'history.json')) loss_test, accuracy_test = model.evaluate(X_test, y_test, verbose=0) print('loss_test={:.3f}, accuracy_test={:.3f}'.format(loss_test, accuracy_test)) """loaded_model = tf.keras.models.load_model(make_path(stage_train_dir, 'saved-model.h5')) loss_test, accuracy_test = loaded_model.evaluate(X_test, y_test, verbose=0) print('loaded_model_loss_test={:.3f}, loaded_model_accuracy_test={:.3f}'.format(loss_test, accuracy_test))""" return model, history #====================================================================
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
wemake_python_styleguide/violations/best_practices.py
# -*- coding: utf-8 -*- """ These checks ensure that you follow the best practices. The source for these best practices is hidden inside countless hours we have spent debugging software or reviewing it. How do we find inspiration for new rules? We find some ugly code during code reviews and audits. Then we forbid to use this bad code forever. So, this error will never return to our codebase. .. currentmodule:: wemake_python_styleguide.violations.best_practices Summary ------- .. autosummary:: :nosignatures: WrongMagicCommentViolation WrongDocCommentViolation OveruseOfNoqaCommentViolation OveruseOfNoCoverCommentViolation ComplexDefaultValueViolation LoopVariableDefinitionViolation ContextManagerVariableDefinitionViolation MutableModuleConstantViolation SameElementsInConditionViolation HeterogenousCompareViolation WrongModuleMetadataViolation EmptyModuleViolation InitModuleHasLogicViolation BadMagicModuleFunctionViolation WrongUnpackingViolation DuplicateExceptionViolation YieldInComprehensionViolation NonUniqueItemsInHashViolation BaseExceptionSubclassViolation TryExceptMultipleReturnPathViolation WrongKeywordViolation WrongFunctionCallViolation FutureImportViolation RaiseNotImplementedViolation BaseExceptionViolation BooleanPositionalArgumentViolation LambdaInsideLoopViolation UnreachableCodeViolation StatementHasNoEffectViolation MultipleAssignmentsViolation NestedFunctionViolation NestedClassViolation MagicNumberViolation NestedImportViolation ReassigningVariableToItselfViolation ListMultiplyViolation ProtectedModuleViolation ProtectedAttributeViolation StopIterationInsideGeneratorViolation WrongUnicodeEscapeViolation BlockAndLocalOverlapViolation ControlVarUsedAfterBlockViolation OuterScopeShadowingViolation UnhashableTypeInHashViolation WrongKeywordConditionViolation Best practices -------------- .. autoclass:: WrongMagicCommentViolation .. autoclass:: WrongDocCommentViolation .. autoclass:: OveruseOfNoqaCommentViolation .. autoclass:: OveruseOfNoCoverCommentViolation .. autoclass:: ComplexDefaultValueViolation .. autoclass:: LoopVariableDefinitionViolation .. autoclass:: ContextManagerVariableDefinitionViolation .. autoclass:: MutableModuleConstantViolation .. autoclass:: SameElementsInConditionViolation .. autoclass:: HeterogenousCompareViolation .. autoclass:: WrongModuleMetadataViolation .. autoclass:: EmptyModuleViolation .. autoclass:: InitModuleHasLogicViolation .. autoclass:: BadMagicModuleFunctionViolation .. autoclass:: WrongUnpackingViolation .. autoclass:: DuplicateExceptionViolation .. autoclass:: YieldInComprehensionViolation .. autoclass:: NonUniqueItemsInHashViolation .. autoclass:: BaseExceptionSubclassViolation .. autoclass:: TryExceptMultipleReturnPathViolation .. autoclass:: WrongKeywordViolation .. autoclass:: WrongFunctionCallViolation .. autoclass:: FutureImportViolation .. autoclass:: RaiseNotImplementedViolation .. autoclass:: BaseExceptionViolation .. autoclass:: BooleanPositionalArgumentViolation .. autoclass:: LambdaInsideLoopViolation .. autoclass:: UnreachableCodeViolation .. autoclass:: StatementHasNoEffectViolation .. autoclass:: MultipleAssignmentsViolation .. autoclass:: NestedFunctionViolation .. autoclass:: NestedClassViolation .. autoclass:: MagicNumberViolation .. autoclass:: NestedImportViolation .. autoclass:: ReassigningVariableToItselfViolation .. autoclass:: ListMultiplyViolation .. autoclass:: ProtectedModuleViolation .. autoclass:: ProtectedAttributeViolation .. autoclass:: StopIterationInsideGeneratorViolation .. autoclass:: WrongUnicodeEscapeViolation .. autoclass:: BlockAndLocalOverlapViolation .. autoclass:: ControlVarUsedAfterBlockViolation .. autoclass:: OuterScopeShadowingViolation .. autoclass:: UnhashableTypeInHashViolation .. autoclass:: WrongKeywordConditionViolation """ from typing_extensions import final from wemake_python_styleguide.violations.base import ( ASTViolation, SimpleViolation, TokenizeViolation, ) @final class WrongMagicCommentViolation(SimpleViolation): """ Restricts to use several control (or magic) comments. We do not allow to use: 1. ``# noqa`` comment without specified violations 2. ``# type: some_type`` comments to specify a type for ``typed_ast`` This violation is reported at the top of the module, so it cannot be locally ignored. Reasoning: We cover several different use-cases in a single rule. ``# noqa`` comment is restricted because it can hide other violations. ``# type: some_type`` comment is restricted because we can already use type annotations instead. Solution: Use ``# noqa`` comments with specified error types. Use type annotations to specify types. We still allow to use ``# type: ignore`` comment. Since sometimes it is totally required. Example:: # Correct: type = MyClass.get_type() # noqa: A001 coordinate: int = 10 some.int_field = 'text' # type: ignore number: int for number in some_untyped_iterable(): ... # Wrong: type = MyClass.get_type() # noqa coordinate = 10 # type: int .. versionadded:: 0.1.0 """ code = 400 error_template = 'Found wrong magic comment: {0}' @final class WrongDocCommentViolation(TokenizeViolation): """ Forbids to use empty doc comments (``#:``). Reasoning: Doc comments are used to provide a documentation. But supplying empty doc comments breaks this use-case. It is unclear why they can be used with no contents. Solution: Add some documentation to this comment. Or remove it. Empty doc comments are not caught by the default ``pycodestyle`` checks. Example:: # Correct: #: List of allowed names: NAMES_WHITELIST = ['feature', 'bug', 'research'] # Wrong: #: NAMES_WHITELIST = ['feature', 'bug', 'research'] .. versionadded:: 0.1.0 """ code = 401 error_template = 'Found wrong doc comment' @final class OveruseOfNoqaCommentViolation(SimpleViolation): """ Forbids to use too many ``# noqa`` comments. We count it on a per-module basis. We use :str:`wemake_python_styleguide.constants.MAX_NOQA_COMMENTS` as a hard limit. Reasoning: Having too many ``# noqa`` comments make your code less readable and clearly indicates that there's something wrong with it. Solution: Refactor your code to match our style. Or use a config file to switch off some checks. .. versionadded:: 0.7.0 """ error_template = 'Found `noqa` comments overuse: {0}' code = 402 @final class OveruseOfNoCoverCommentViolation(SimpleViolation): """ Forbids to use too many ``# pragma: no cover`` comments. We count it on a per-module basis. We use :str:`wemake_python_styleguide.constants.MAX_NO_COVER_COMMENTS` as a default value. Reasoning: Having too many ``# pragma: no cover`` comments clearly indicates that there's something wrong with it. Moreover, it makes your tests useless, since they do not cover a big partion of your code. Solution: Refactor your code to much the style. Or use a config file to switch off some checks. .. versionadded:: 0.8.0 """ error_template = 'Found `noqa` comments overuse: {0}' code = 403 @final class ComplexDefaultValueViolation(ASTViolation): """ Forbids to use complex defaults. Anything that is not a ``ast.Name``, ``ast.Attribute``, ``ast.Str``, ``ast.NameConstant``, ``ast.Tuple``, ``ast.Bytes``, ``ast.Num`` or ``ast.Ellipsis`` should be moved out from defaults. Reasoning: It can be tricky. Nothing stops you from making database calls or http requests in such expressions. It is also not readable for us. Solution: Move the expression out from default value. Example:: # Correct: SHOULD_USE_DOCTEST = 'PYFLAKES_DOCTEST' in os.environ def __init__(self, with_doctest=SHOULD_USE_DOCTEST): # Wrong: def __init__(self, with_doctest='PYFLAKES_DOCTEST' in os.environ): .. versionadded:: 0.8.0 .. versionchanged:: 0.11.0 """ error_template = 'Found complex default value' code = 404 previous_codes = {459} @final class LoopVariableDefinitionViolation(ASTViolation): """ Forbids to use anything rather than ``ast.Name`` to define loop variables. Reasoning: When defining a ``for`` loop with attributes, indexes, calls, or any other nodes it does dirty things inside. Solution: Use regular ``ast.Name`` variables. Or tuple of ``ast.Name`` variables. Star names are also fine. Example:: # Correct: for person in database.people(): ... # Wrong: for context['person'] in database.people(): ... .. versionadded:: 0.8.0 .. versionchanged:: 0.11.0 """ error_template = 'Found wrong `for` loop variable definition' code = 405 previous_codes = {460} @final class ContextManagerVariableDefinitionViolation(ASTViolation): """ Forbids to use anything rather than ``ast.Name`` to define contexts. Reasoning: When defining a ``with`` context managers with attributes, indexes, calls, or any other nodes it does dirty things inside. Solution: Use regular ``ast.Name`` variables. Or tuple of ``ast.Name`` variables. Star names are also fine. Example:: # Correct: with open('README.md') as readme: ... # Wrong: with open('README.md') as files['readme']: ... .. versionadded:: 0.8.0 .. versionchanged:: 0.11.0 """ error_template = 'Found wrong context manager variable definition' code = 406 previous_codes = {461} @final class MutableModuleConstantViolation(ASTViolation): """ Forbids mutable constants on a module level. Reasoning: Constants should be immutable. Solution: Use immutable types for constants. We only treat ``ast.Set``, ``ast.Dict``, ``ast.List``, and comprehensions as mutable things. All other nodes are still fine. Example:: # Correct: import types CONST1 = frozenset((1, 2, 3)) CONST2 = (1, 2, 3) CONST3 = types.MappingProxyType({'key': 'value'}) # Wrong: CONST1 = {1, 2, 3} CONST2 = [x for x in some()] CONST3 = {'key': 'value'} .. versionadded:: 0.10.0 .. versionchanged:: 0.11.0 """ error_template = 'Found mutable module constant' code = 407 previous_codes = {466} @final class SameElementsInConditionViolation(ASTViolation): """ Forbids to use the same logical conditions in one expression. Reasoning: Using the same name in logical condition more that once indicates that you are either making a logical mistake, or just over-complicating your design. Solution: Remove the duplicating condition. Example:: # Correct: if some_value or other_value: ... # Wrong: if some_value or some_value: ... .. versionadded:: 0.10.0 .. versionchanged:: 0.11.0 """ error_template = 'Found duplicate logical condition' code = 408 previous_codes = {469} @final class HeterogenousCompareViolation(ASTViolation): """ Forbids to heterogenous operators in one compare. Note, that we allow to mix ``>`` with ``>=`` and ``<`` with ``<=`` operators. Reasoning: This is hard to read and understand. Solution: Refactor the expression to have separate parts joined with ``and`` boolean operator. Example:: # Correct: if x == y == z: ... if x > y >= z: ... # Wrong: if x > y == 5: ... if x == y != z: ... .. versionadded:: 0.10.0 .. versionchanged:: 0.11.0 """ error_template = 'Found heterogenous compare' code = 409 previous_codes = {471} @final class WrongModuleMetadataViolation(ASTViolation): """ Forbids to have some module level variables. Reasoning: We discourage using module variables like ``__author__``, because code should not contain any metadata. Solution: Place all the metadata in ``setup.py``, ``setup.cfg``, or ``pyproject.toml``. Use proper docstrings and packaging classifiers. Use ``pkg_resources`` if you need to import this data into your app. See :py:data:`~wemake_python_styleguide.constants.MODULE_METADATA_VARIABLES_BLACKLIST` for full list of bad names. Example:: # Wrong: __author__ = 'Nikita Sobolev' __version__ = 0.1.2 .. versionadded:: 0.1.0 """ error_template = 'Found wrong metadata variable: {0}' code = 410 @final class EmptyModuleViolation(SimpleViolation): """ Forbids to have empty modules. Reasoning: Why is it even there? Do not pollute your project with empty files. Solution: If you have an empty module there are two ways to handle that: 1. delete it 2. drop some documentation in it, so you will explain why it is there .. versionadded:: 0.1.0 """ error_template = 'Found empty module' code = 411 @final class InitModuleHasLogicViolation(SimpleViolation): """ Forbids to have logic inside ``__init__`` module. Reasoning: If you have logic inside the ``__init__`` module it means several things: 1. you are keeping some outdated stuff there, you need to refactor 2. you are placing this logic into the wrong file, just create another one 3. you are doing some dark magic, and you should not do that Solution: Put your code in other modules. However, we allow to have some contents inside the ``__init__`` module: 1. comments, since they are dropped before AST comes in play 2. docs string, because sometimes it is required to state something It is also fine when you have different users that use your code. And you do not want to break everything for them. In this case this rule can be configured. Configuration: This rule is configurable with ``--i-control-code``. Default: :str:`wemake_python_styleguide.options.defaults.I_CONTROL_CODE` .. versionadded:: 0.1.0 """ error_template = 'Found `__init__.py` module with logic' code = 412 @final class BadMagicModuleFunctionViolation(ASTViolation): """ Forbids to use ``__getaddr__`` and ``__dir__`` module magic methods. Reasoning: It does not bring any features, only making it harder to understand what is going on. Solution: Refactor your code to use custom methods instead. Configuration: This rule is configurable with ``--i-control-code``. Default: :str:`wemake_python_styleguide.options.defaults.I_CONTROL_CODE` .. versionadded:: 0.9.0 """ error_template = 'Found bad magic module function: {0}' code = 413 @final class WrongUnpackingViolation(ASTViolation): """ Forbids to have tuple unpacking with side-effects. Reasoning: Having unpacking with side-effects is very dirty. You might get in serious and very hard-to-debug troubles because of this technique. So, do not use it. Solution: Use unpacking with only variables, not any other entities. Example:: # Correct: first, second = some() # Wrong: first, some_dict['alias'] = some() .. versionadded:: 0.6.0 .. versionchanged:: 0.11.0 """ error_template = 'Found incorrect unpacking target' code = 414 previous_codes = {446} @final class DuplicateExceptionViolation(ASTViolation): """ Forbids to have the same exception class in multiple ``except`` blocks. Reasoning: Having the same exception name in different blocks means that something is not right: since only one branch will work. Other one will always be ignored. So, that is clearly an error. Solution: Use unique exception handling rules. Example:: # Correct: try: ... except ValueError: ... # Wrong: try: ... except ValueError: ... except ValueError: ... .. versionadded:: 0.6.0 .. versionchanged:: 0.11.0 """ error_template = 'Found duplicate exception: {0}' code = 415 previous_codes = {447} @final class YieldInComprehensionViolation(ASTViolation): """ Forbids to have ``yield`` keyword inside comprehensions. Reasoning: Having the ``yield`` keyword inside comprehensions is error-prone. You can shoot yourself in a foot by an inaccurate usage of this feature. Solution: Use regular ``for`` loops with ``yield`` keywords. Or create a separate generator function. Example:: # Wrong: list((yield letter) for letter in 'ab') # Will resilt in: ['a', None, 'b', None] list([(yield letter) for letter in 'ab']) # Will result in: ['a', 'b'] See also: https://github.com/satwikkansal/wtfPython#-yielding-none .. versionadded:: 0.7.0 .. versionchanged:: 0.11.0 """ error_template = 'Found `yield` inside comprehension' code = 416 previous_codes = {448} @final class NonUniqueItemsInHashViolation(ASTViolation): """ Forbids to have duplicate items in hashes. Reasoning: When you explicitly put duplicate items in ``set`` literals or in ``dict`` keys it just does not make any sense. Since hashes cannot contain duplicate items and they will be removed anyway. Solution: Remove duplicate items. Example:: # Correct: some_set = {'a', variable1} some_set = {make_call(), make_call()} # Wrong: some_set = {'a', 'a', variable1, variable1} Things that we consider duplicates: builtins and variables. These nodes are not checked because they may return different results: - function and method calls - comprehensions - attributes - subscribe operations .. versionadded:: 0.7.0 .. versionchanged:: 0.11.0 .. versionchanged:: 0.12.0 """ error_template = 'Found non-unique item in hash: {0}' code = 417 previous_codes = {449} @final class BaseExceptionSubclassViolation(ASTViolation): """ Forbids to have duplicate items in ``set`` literals. Reasoning: ``BaseException`` is a special case: it is not designed to be extended by users. A lot of your ``except Exception`` cases won't work. That's incorrect and dangerous. Solution: Change the base class to ``Exception``. Example:: # Correct: class MyException(Exception): ... # Wrong: class MyException(BaseException): ... See also: https://docs.python.org/3/library/exceptions.html#exception-hierarchy .. versionadded:: 0.7.0 .. versionchanged:: 0.11.0 """ error_template = 'Found exception inherited from `BaseException`' code = 418 previous_codes = {450} @final class TryExceptMultipleReturnPathViolation(ASTViolation): """ Forbids to use multiple returning paths with ``try`` / ``except`` case. Note, that we check for any ``return``, ``break``, or ``raise`` nodes. Reasoning: The problem with ``return`` in ``else`` and ``finally`` is that it is impossible to say what value is going to be actually returned without looking up the implementation details. Why? Because ``return`` does not expect that some other code will be executed after it. But, ``finally`` is always executed, even after ``return``. And ``else`` will not be executed when there are no exceptions in ``try`` case and a ``return`` statement. Solution: Remove ``return`` from one of the cases. Example:: # Correct: try: return 1 except YourException: ... finally: clear_things_up() # Wrong: try: return 1 # this line will never return except Exception: ... finally: return 2 # this line will actually return try: return 1 # this line will actually return except ZeroDivisionError: ... else: return 0 # this line will never return .. versionadded:: 0.7.0 .. versionchanged:: 0.11.0 .. versionchanged:: 0.12.0 """ error_template = 'Found `try`/`else`/`finally` with multiple return paths' code = 419 previous_codes = {458} @final class WrongKeywordViolation(ASTViolation): """ Forbids to use some ``python`` keywords. Reasoning: Using some keywords generally gives you more pain that relieve. ``del`` keyword is not composable with other functions, you cannot pass it as a regular function. It is also quite error-prone due to ``__del__`` magic method complexity and that ``del`` is actually used to nullify variables and delete them from the execution scope. Moreover, it has a lot of substitutions. You won't miss it! ``pass`` keyword is just useless by design. There's no usecase for it. Because it does literally nothing. ``global`` and ``nonlocal`` promote bad-practices of having an external mutable state somewhere. This solution does not scale. And leads to multiple possible mistakes in the future. Solution: Solutions differ from keyword to keyword. ``pass`` should be replaced with docstring or ``contextlib.suppress``. ``del`` should be replaced with specialized methods like ``.pop()``. ``global`` and ``nonlocal`` usages should be refactored. .. versionadded:: 0.1.0 """ error_template = 'Found wrong keyword: {0}' code = 420 @final class WrongFunctionCallViolation(ASTViolation): """ Forbids to call some built-in functions. Reasoning: Some functions are only suitable for very specific use cases, we forbid to use them in a free manner. See :py:data:`~wemake_python_styleguide.constants.FUNCTIONS_BLACKLIST` for the full list of blacklisted functions. See also: https://www.youtube.com/watch?v=YjHsOrOOSuI .. versionadded:: 0.1.0 """ error_template = 'Found wrong function call: {0}' code = 421 @final class FutureImportViolation(ASTViolation): """ Forbids to use ``__future__`` imports. Reasoning: Almost all ``__future__`` imports are legacy ``python2`` compatibility tools that are no longer required. Solution: Remove them. Drop ``python2`` support. Except, there are some new ones for ``python4`` support. See :py:data:`~wemake_python_styleguide.constants.FUTURE_IMPORTS_WHITELIST` for the full list of allowed future imports. Example:: # Correct: from __future__ import annotations # Wrong: from __future__ import print_function .. versionadded:: 0.1.0 """ error_template = 'Found future import: {0}' code = 422 @final class RaiseNotImplementedViolation(ASTViolation): """ Forbids to use ``NotImplemented`` error. Reasoning: These two violations look so similar. But, these violations have different use cases. Use cases of ``NotImplemented`` is too limited to be generally available. Solution: Use ``NotImplementedError``. Example:: # Correct: raise NotImplementedError('To be done') # Wrong: raise NotImplemented .. versionadded:: 0.1.0 See also: https://stackoverflow.com/a/44575926/4842742 """ error_template = 'Found raise NotImplemented' code = 423 @final class BaseExceptionViolation(ASTViolation): """ Forbids to use ``BaseException`` exception. Reasoning: We can silence system exit and keyboard interrupt with this exception handler. It is almost the same as raw ``except:`` block. Solution: Handle ``Exception``, ``KeyboardInterrupt``, ``GeneratorExit``, and ``SystemExit`` separately. Do not use the plain ``except:`` keyword. Example:: # Correct: except Exception as ex: ... # Wrong: except BaseException as ex: ... .. versionadded:: 0.3.0 See also: https://docs.python.org/3/library/exceptions.html#exception-hierarchy https://help.semmle.com/wiki/pages/viewpage.action?pageId=1608527 """ error_template = 'Found except `BaseException`' code = 424 @final class BooleanPositionalArgumentViolation(ASTViolation): """ Forbids to pass booleans as non-keyword parameters. Reasoning: Passing boolean as regular positional parameters is very non-descriptive. It is almost impossible to tell, what does this parameter means. And you almost always have to look up the implementation to tell what is going on. Solution: Pass booleans as keywords only. This will help you to save extra context on what's going on. Example:: # Correct: UsersRepository.update(cache=True) # Wrong: UsersRepository.update(True) .. versionadded:: 0.6.0 """ error_template = 'Found boolean non-keyword argument: {0}' code = 425 @final class LambdaInsideLoopViolation(ASTViolation): """ Forbids to use ``lambda`` inside loops. Reasoning: It is error-prone to use ``lambda`` inside ``for`` and ``while`` loops due to the famous late-binding. Solution: Use regular functions, factory functions, or ``partial`` functions. Save yourself from possible confusion. Example:: # Correct: for index in range(10): some.append(partial_function(index)) # Wrong: for index in range(10): some.append(lambda index=index: index * 10)) other.append(lambda: index * 10)) .. versionadded:: 0.5.0 .. versionchanged:: 0.11.0 See also: https://docs.python-guide.org/writing/gotchas/#late-binding-closures """ error_template = "Found `lambda` in loop's body" code = 426 previous_codes = {442} @final class UnreachableCodeViolation(ASTViolation): """ Forbids to have unreachable code. What is unreachable code? It is some lines of code that cannot be executed by python's interpreter. This is probably caused by ``return`` or ``raise`` statements. However, we can not cover 100% of truly unreachable code by this rule. This happens due to the dynamic nature of python. For example, detecting that ``1 / some_value`` would sometimes raise an exception is too complicated and is out of the scope of this rule. Reasoning: Having dead code in your project is an indicator that you do not care about your code base at all. It dramatically reduces code quality and readability. It also demotivates team members. Solution: Delete any unreachable code you have. Or refactor it, if this happens by your mistake. Example:: # Correct: def some_function(): print('This line is reachable, all good') return 5 # Wrong: def some_function(): return 5 print('This line is unreachable') .. versionadded:: 0.5.0 .. versionchanged:: 0.11.0 """ error_template = 'Found unreachable code' code = 427 previous_codes = {443} @final class StatementHasNoEffectViolation(ASTViolation): """ Forbids to have statements that do nothing. Reasoning: Statements that just access the value or expressions used as statements indicate that your code contains deadlines. They just pollute your codebase and do nothing. Solution: Refactor your code in case it was a typo or error. Or just delete this code. Example:: # Correct: def some_function(): price = 8 + 2 return price # Wrong: def some_function(): 8 + 2 print .. versionadded:: 0.5.0 .. versionchanged:: 0.11.0 """ error_template = 'Found statement that has no effect' code = 428 previous_codes = {444} @final class MultipleAssignmentsViolation(ASTViolation): """ Forbids to have multiple assignments on the same line. Reasoning: Multiple assignments on the same line might not do what you think they do. They can also grown pretty long. And you will not notice the rising complexity of your code. Solution: Use separate lines for each assignment. Example:: # Correct: a = 1 b = 1 # Wrong: a = b = 1 .. versionadded:: 0.6.0 .. versionchanged:: 0.11.0 """ error_template = 'Found multiple assign targets' code = 429 previous_codes = {445} @final class NestedFunctionViolation(ASTViolation): """ Forbids to have nested functions. Reasoning: Nesting functions is a bad practice. It is hard to test them, it is hard then to separate them. People tend to overuse closures, so it's hard to manage the dataflow. Solution: Just write flat functions, there's no need to nest them. Pass parameters as normal arguments, do not use closures. Until you need them for decorators or factories. We also disallow to nest ``lambda`` and ``async`` functions. See :py:data:`~wemake_python_styleguide.constants.NESTED_FUNCTIONS_WHITELIST` for the whole list of whitelisted names. Example:: # Correct: def do_some(): ... def other(): ... # Wrong: def do_some(): def inner(): ... .. versionadded:: 0.1.0 """ error_template = 'Found nested function: {0}' code = 430 @final class NestedClassViolation(ASTViolation): """ Forbids to use nested classes. Reasoning: Nested classes are really hard to manage. You can not even create an instance of this class in many cases. Testing them is also really hard. Solution: Just write flat classes, there's no need nest them. If you are nesting classes inside a function for parametrization, then you will probably need to use different design (or metaclasses). Configuration: This rule is configurable with ``--nested-classes-whitelist``. Default: :str:`wemake_python_styleguide.options.defaults.NESTED_CLASSES_WHITELIST` Example:: # Correct: class Some(object): ... class Other(object): ... # Wrong: class Some(object): class Inner(object): ... .. versionadded:: 0.1.0 .. versionchanged:: 0.13.0 """ error_template = 'Found nested class: {0}' code = 431 @final class MagicNumberViolation(ASTViolation): """ Forbids to use magic numbers in your code. What we call a "magic number"? Well, it is actually any number that appears in your code out of nowhere. Like ``42``. Or ``0.32``. Reasoning: It is very hard to remember what these numbers actually mean. Why were they used? Should they ever be changed? Or are they eternal like ``3.14``? Solution: Give these numbers a name! Move them to a separate variable, giving more context to the reader. And by moving things into new variables you will trigger other complexity checks. Example:: # Correct: price_in_euro = 3.33 # could be changed later total = get_items_from_cart() * price_in_euro # Wrong: total = get_items_from_cart() * 3.33 What are numbers that we exclude from this check? Any numbers that are assigned to a variable, array, dictionary, or keyword arguments inside a function. ``int`` numbers that are in range ``[-10, 10]`` and some other common numbers, that are defined in :py:data:`~wemake_python_styleguide.constants.MAGIC_NUMBERS_WHITELIST` .. versionadded:: 0.1.0 See also: https://en.wikipedia.org/wiki/Magic_number_(programming) """ code = 432 error_template = 'Found magic number: {0}' @final class NestedImportViolation(ASTViolation): """ Forbids to have nested imports in functions. Reasoning: Usually, nested imports are used to fix the import cycle. So, nested imports show that there's an issue with your design. Solution: You don't need nested imports, you need to refactor your code. Introduce a new module or find another way to do what you want to do. Rethink how your layered architecture should look like. Example:: # Correct: from my_module import some_function def some(): ... # Wrong: def some(): from my_module import some_function .. versionadded:: 0.1.0 .. versionchanged:: 0.11.0 See also: https://github.com/seddonym/layer_linter """ error_template = 'Found nested import' code = 433 previous_codes = {435} @final class ReassigningVariableToItselfViolation(ASTViolation): """ Forbids to assign variable to itself. Reasoning: There is no need to do that. Generally, it is an indication of some errors or just dead code. Example:: # Correct: some = some + 1 x_coord, y_coord = y_coord, x_coord # Wrong: some = some x_coord, y_coord = x_coord, y_coord .. versionadded:: 0.3.0 .. versionchanged:: 0.11.0 """ error_template = 'Found reassigning variable to itself: {0}' code = 434 previous_codes = {438} @final class ListMultiplyViolation(ASTViolation): """ Forbids to multiply lists. Reasoning: When you multiply lists - it does not create new values, it creates references to the existing value. It is not what people mean in 99.9% of cases. Solution: Use list comprehension or loop instead. Example:: # Wrong: my_list = [1, 2, 3] * 3 See also: https://github.com/satwikkansal/wtfPython#-explanation-8 .. versionadded:: 0.12.0 """ error_template = 'Found list multiply' code = 435 @final class ProtectedModuleViolation(ASTViolation): """ Forbids to import protected modules. Reasoning: When importing protected modules we break a contract that authors of this module enforce. This way we are not respecting encapsulation and it may break our code at any moment. Solution: Do not import anything from protected modules. Respect the encapsulation. Example:: # Correct: from some.public.module import FooClass # Wrong: import _compat from some._protected.module import BarClass from some.module import _protected .. versionadded:: 0.3.0 .. versionchanged:: 0.11.0 """ error_template = 'Found protected module import' code = 436 previous_codes = {440} @final class ProtectedAttributeViolation(ASTViolation): """ Forbids to use protected attributes and methods. Reasoning: When using protected attributes and method we break a contract that authors of this class enforce. This way we are not respecting encapsulation and it may break our code at any moment. Solution: Do not use protected attributes and methods. Respect the encapsulation. Example:: # Correct: self._protected = 1 cls._hidden_method() some.public() super()._protected() # Wrong: print(some._protected) instance._hidden() self.container._internal = 10 Note, that it is possible to use protected attributes with ``self``, ``cls``, and ``super()`` as base names. We allow this so you can create and use protected attributes and methods inside the class context. This is how protected attributes should be used. .. versionadded:: 0.3.0 .. versionchanged:: 0.11.0 """ error_template = 'Found protected attribute usage: {0}' code = 437 previous_codes = {441} @final class StopIterationInsideGeneratorViolation(ASTViolation): """ Forbids to raise ``StopIteration`` inside generators. Reasoning: ``StopIteration`` should not be raised explicitly in generators. Solution: Use return statement to get out of a generator. Example:: # Correct: def some_generator(): if some_value: return yield 1 # Wrong: def some_generator(): if some_value: raise StopIteration yield 1 See also: https://docs.python.org/3/library/exceptions.html#StopIteration .. versionadded:: 0.12.0 """ error_template = 'Found `StopIteration` raising inside generator' code = 438 @final class WrongUnicodeEscapeViolation(TokenizeViolation): r""" Forbids to use unicode escape sequences in binary strings. Reasoning: Binary strings do not work with unicode. Having unicode escape characters in there means that you have an error in your code. Solution: Use regular strings when escaping unicode strings. Example:: # Correct: escaped = '\u0041' # equals to 'A' # Wrong: escaped = b'\u0040' # equals to b'\\u0040' .. versionadded:: 0.12.0 """ error_template = 'Found unicode escape in a binary string: {0}' code = 439 @final class BlockAndLocalOverlapViolation(ASTViolation): """ Forbids to local and block variables to overlap. What we call local variables: 1. Assigns and annotations 2. Function arguments (they are local to the function body) What we call block variables: 1. Imports 2. Functions and async functions definitions 3. Classes, methods, and async methods definitions 4. For and async for loops variables 5. Except block exception aliases We allow local variables to overlap theirselfs, we forbid block varibals to overlap theirselfs. Reasoning: A lot of complex errors might happen when you shadow local varibales with block variables or when you shadow block variables with local variables. Solution: Use names that do not overlap. Example:: # Correct: my_value = 1 my_value = my_value + 1 # Wrong: import my_value my_value = 1 # overlaps with import See also: https://github.com/satwikkansal/wtfPython#-explanation-20 .. versionadded:: 0.12.0 """ error_template = 'Found block variables overlap: {0}' code = 440 @final class ControlVarUsedAfterBlockViolation(ASTViolation): """ Forbids to use control variables after the block body. What we call block control variables: 1. ``for`` loop unpacked variables 2. ``with`` context variables 3. ``except`` exception names Reasoning: Variables leaking from the blocks can damage your logic. It might not contain what you think they contain. Some variables even might be deleted right after the block, just like in ``except Exception as exc:`` where ``exc`` won't be in scope after ``except`` body. Solution: Use names inside the scope they are defined. Create new functions to return values in case you need to use block variables: when searching for a value, etc. Example:: # Correct: for my_item in collection: print(my_item) # Wrong: for my_item in collection: ... print(my_item) See also: https://github.com/satwikkansal/wtfPython#-explanation-32 .. versionadded:: 0.12.0 """ error_template = 'Found control variable used after block: {0}' code = 441 @final class OuterScopeShadowingViolation(ASTViolation): """ Forbids to shadow variables from outer scopes. We check function, method, and module scopes. While we do not check class scope. Because class level constants are not available via regular name, and they are scope to ``ClassName.var_name``. Reasoning: Shadowing can lead you to a big pile of strage and unexpected bugs. Solution: Use different names and do not allow scoping. Example:: # Correct: def test(): ... def other(): test1 = 1 # Wrong: def test(): ... def other(): test = 1 # shadows `test()` function .. versionadded:: 0.12.0 """ error_template = 'Found outer scope names shadowing: {0}' code = 442 @final class UnhashableTypeInHashViolation(ASTViolation): """ Forbids to use exlicit unhashable types as set items and dict keys. Reasoning: This will resolve in ``TypeError`` in runtime. Solution: Use hashable types to define set items and dict keys. Example:: # Correct: my_dict = {1: {}, (1, 2): [], (2, 3): {1, 2}} # Wrong: my_dict = {[1, 2]: [], {2, 3}: {1, 2}} .. versionadded:: 0.12.0 """ error_template = 'Found unhashable item' code = 443 @final class WrongKeywordConditionViolation(ASTViolation): """ Forbids to use exlicit falsly-evaluated conditions with several keywords. We check: - ``ast.While`` - ``ast.Assert`` We only check constants. We do not check variables, attributes, calls, etc. Reasoning: Some conditions clearly tell us that this node won't work correctly. So, we need to check that we can fix that. Solution: Remove the unreachable node, or change the condition item. Example:: # Correct: assert some_variable while True: ... # Wrong: assert [] while False: ... .. versionadded:: 0.12.0 """ error_template = 'Found wrong keyword condition: {0}' code = 444
[]
[]
[]
[]
[]
python
0
0
pictures/asgi.py
""" ASGI config for pictures project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pictures.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
examples/pwr_run/checkpointing/throughput/final2_inverse/job25.py
""" #Trains a ResNet on the CIFAR10 dataset. """ from __future__ import print_function import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, Input, Flatten from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras.callbacks import ReduceLROnPlateau, TensorBoard from keras.preprocessing.image import ImageDataGenerator from keras.regularizers import l2 from keras import backend as K from keras.models import Model from keras.datasets import cifar10 from keras.applications.mobilenet_v2 import MobileNetV2 from keras import models, layers, optimizers from datetime import datetime import tensorflow as tf import numpy as np import os import pdb import sys import argparse import time import signal import glob import json import send_signal parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training') parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name') parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint') parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use') parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)') parser.set_defaults(resume=False) args = parser.parse_args() os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num # Training parameters batch_size = 256 args_lr = 0.0007 epoch_begin_time = 0 job_name = sys.argv[0].split('.')[0] save_files = '/scratch/li.baol/checkpoint_final2_inverse/' + job_name + '*' total_epochs = 90 starting_epoch = 0 # first step is to update the PID pid = os.getpid() message = job_name + ' pid ' + str(pid) # 'job50 pid 3333' send_signal.send(args.node, 10002, message) if args.resume: save_file = glob.glob(save_files)[0] # epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0]) starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1]) data_augmentation = True num_classes = 10 # Subtracting pixel mean improves accuracy subtract_pixel_mean = True n = 3 # Model name, depth and version model_type = args.tc #'P100_resnet50_he_256_1' # Load the CIFAR10 data. (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Normalize data. x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 # If subtract pixel mean is enabled if subtract_pixel_mean: x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') print('y_train shape:', y_train.shape) # Convert class vectors to binary class matrices. y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) if args.resume: print('resume from checkpoint') message = job_name + ' b_end' send_signal.send(args.node, 10002, message) model = keras.models.load_model(save_file) message = job_name + ' c_end' send_signal.send(args.node, 10002, message) else: print('train from start') model = models.Sequential() base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) #base_model.summary() #pdb.set_trace() model.add(base_model) model.add(layers.Flatten()) #model.add(layers.BatchNormalization()) #model.add(layers.Dense(128, activation='relu')) #model.add(layers.Dropout(0.5)) #model.add(layers.BatchNormalization()) #model.add(layers.Dense(64, activation='relu')) #model.add(layers.Dropout(0.5)) #model.add(layers.BatchNormalization()) model.add(layers.Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=args_lr), metrics=['accuracy']) #model.summary() print(model_type) #pdb.set_trace() current_epoch = 0 ################### connects interrupt signal to the process ##################### def terminateProcess(signalNumber, frame): # first record the wasted epoch time global epoch_begin_time if epoch_begin_time == 0: epoch_waste_time = 0 else: epoch_waste_time = int(time.time() - epoch_begin_time) message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100' if epoch_waste_time > 0: send_signal.send(args.node, 10002, message) print('checkpointing the model triggered by kill -15 signal') # delete whatever checkpoint that already exists for f in glob.glob(save_files): os.remove(f) model.save('/scratch/li.baol/checkpoint_final2_inverse/' + job_name + '_' + str(current_epoch) + '.h5') print ('(SIGTERM) terminating the process') message = job_name + ' checkpoint' send_signal.send(args.node, 10002, message) sys.exit() signal.signal(signal.SIGTERM, terminateProcess) ################################################################################# logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch') first_epoch_start = 0 class PrintEpoch(keras.callbacks.Callback): def on_epoch_begin(self, epoch, logs=None): global current_epoch, first_epoch_start #remaining_epochs = epochs - epoch current_epoch = epoch print('current epoch ' + str(current_epoch)) global epoch_begin_time epoch_begin_time = time.time() if epoch == starting_epoch and args.resume: first_epoch_start = time.time() message = job_name + ' d_end' send_signal.send(args.node, 10002, message) elif epoch == starting_epoch: first_epoch_start = time.time() if epoch == starting_epoch: # send signal to indicate checkpoint is qualified message = job_name + ' ckpt_qual' send_signal.send(args.node, 10002, message) def on_epoch_end(self, epoch, logs=None): if epoch == starting_epoch: first_epoch_time = int(time.time() - first_epoch_start) message = job_name + ' 1st_epoch ' + str(first_epoch_time) send_signal.send(args.node, 10002, message) progress = round((epoch+1) / round(total_epochs/2), 2) message = job_name + ' completion ' + str(progress) send_signal.send(args.node, 10002, message) my_callback = PrintEpoch() callbacks = [tensorboard_callback, my_callback] #[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback] # Run training model.fit(x_train, y_train, batch_size=batch_size, epochs=round(total_epochs/2), validation_data=(x_test, y_test), shuffle=True, callbacks=callbacks, initial_epoch=starting_epoch, verbose=1 ) # Score trained model. scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # send signal to indicate job has finished message = job_name + ' finish' send_signal.send(args.node, 10002, message)
[]
[]
[ "CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
python
2
0
multiagent/rendering.py
""" 2D rendering framework """ from __future__ import division import os import six import sys if "Apple" in sys.version: if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ: os.environ['DYLD_FALLBACK_LIBRARY_PATH'] += ':/usr/lib' # (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite from gym import error try: import pyglet except ImportError as e: raise ImportError("HINT: you can install pyglet directly via 'pip install pyglet'. But if you really just want to install all Gym dependencies and not have to think about it, 'pip install -e .[all]' or 'pip install gym[all]' will do it.") try: from pyglet.gl import * except ImportError as e: raise ImportError("Error occured while running `from pyglet.gl import *`",suffix="HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'. If you're running on a server, you may need a virtual frame buffer; something like this should work: 'xvfb-run -s \"-screen 0 1400x900x24\" python <your_script.py>'") import math import numpy as np RAD2DEG = 57.29577951308232 def get_display(spec): """Convert a display specification (such as :0) into an actual Display object. Pyglet only supports multiple Displays on Linux. """ if spec is None: return pyglet.canvas.get_display() # returns already available pyglet_display, # if there is no pyglet display available then it creates one elif isinstance(spec, str): return pyglet.canvas.Display(spec) else: raise error.Error('Invalid display specification: {}. (Must be a string like :0 or None.)'.format(spec)) class Viewer(object): def __init__(self, width, height, display=None): display = get_display(display) self.width = width self.height = height self.window = pyglet.window.Window(width=width, height=height, display=display) self.window.on_close = self.window_closed_by_user self.geoms = [] self.onetime_geoms = [] self.transform = Transform() glEnable(GL_BLEND) # glEnable(GL_MULTISAMPLE) glEnable(GL_LINE_SMOOTH) # glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE) glHint(GL_LINE_SMOOTH_HINT, GL_NICEST) glLineWidth(2.0) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) def close(self): self.window.close() def window_closed_by_user(self): self.close() def set_bounds(self, left, right, bottom, top): assert right > left and top > bottom scalex = self.width/(right-left) scaley = self.height/(top-bottom) self.transform = Transform( translation=(-left*scalex, -bottom*scaley), scale=(scalex, scaley)) def add_geom(self, geom): self.geoms.append(geom) def add_onetime(self, geom): self.onetime_geoms.append(geom) def render(self, return_rgb_array=False): glClearColor(1,1,1,1) self.window.clear() self.window.switch_to() self.window.dispatch_events() self.transform.enable() for geom in self.geoms: geom.render() for geom in self.onetime_geoms: geom.render() self.transform.disable() arr = None if return_rgb_array: buffer = pyglet.image.get_buffer_manager().get_color_buffer() image_data = buffer.get_image_data() arr = np.fromstring(image_data.get_data(), dtype=np.uint8, sep='') # In https://github.com/openai/gym-http-api/issues/2, we # discovered that someone using Xmonad on Arch was having # a window of size 598 x 398, though a 600 x 400 window # was requested. (Guess Xmonad was preserving a pixel for # the boundary.) So we use the buffer height/width rather # than the requested one. arr = arr.reshape(buffer.height, buffer.width, 4) arr = arr[::-1,:,0:3] self.window.flip() self.onetime_geoms = [] return arr # Convenience def draw_circle(self, radius=10, res=30, filled=True, **attrs): geom = make_circle(radius=radius, res=res, filled=filled) _add_attrs(geom, attrs) self.add_onetime(geom) return geom def draw_polygon(self, v, filled=True, **attrs): geom = make_polygon(v=v, filled=filled) _add_attrs(geom, attrs) self.add_onetime(geom) return geom def draw_polyline(self, v, **attrs): geom = make_polyline(v=v) _add_attrs(geom, attrs) self.add_onetime(geom) return geom def draw_line(self, start, end, **attrs): geom = Line(start, end) _add_attrs(geom, attrs) self.add_onetime(geom) return geom def get_array(self): self.window.flip() image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data() self.window.flip() arr = np.fromstring(image_data.data, dtype=np.uint8, sep='') arr = arr.reshape(self.height, self.width, 4) return arr[::-1,:,0:3] def _add_attrs(geom, attrs): if "color" in attrs: geom.set_color(*attrs["color"]) if "linewidth" in attrs: geom.set_linewidth(attrs["linewidth"]) class Geom(object): def __init__(self): self._color=Color((0, 0, 0, 1.0)) self.attrs = [self._color] def render(self): for attr in reversed(self.attrs): attr.enable() self.render1() for attr in self.attrs: attr.disable() def render1(self): raise NotImplementedError def add_attr(self, attr): self.attrs.append(attr) def set_color(self, r, g, b, alpha=1): self._color.vec4 = (r, g, b, alpha) class Attr(object): def enable(self): raise NotImplementedError def disable(self): pass class Transform(Attr): def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1,1)): self.set_translation(*translation) self.set_rotation(rotation) self.set_scale(*scale) def enable(self): glPushMatrix() glTranslatef(self.translation[0], self.translation[1], 0) # translate to GL loc ppint glRotatef(RAD2DEG * self.rotation, 0, 0, 1.0) glScalef(self.scale[0], self.scale[1], 1) def disable(self): glPopMatrix() def set_translation(self, newx, newy): self.translation = (float(newx), float(newy)) def set_rotation(self, new): self.rotation = float(new) def set_scale(self, newx, newy): self.scale = (float(newx), float(newy)) class Color(Attr): def __init__(self, vec4): self.vec4 = vec4 def enable(self): glColor4f(*self.vec4) class LineStyle(Attr): def __init__(self, style): self.style = style def enable(self): glEnable(GL_LINE_STIPPLE) glLineStipple(1, self.style) def disable(self): glDisable(GL_LINE_STIPPLE) class LineWidth(Attr): def __init__(self, stroke): self.stroke = stroke def enable(self): glLineWidth(self.stroke) class Point(Geom): def __init__(self): Geom.__init__(self) def render1(self): glBegin(GL_POINTS) # draw point glVertex3f(0.0, 0.0, 0.0) glEnd() class FilledPolygon(Geom): def __init__(self, v): Geom.__init__(self) self.v = v def render1(self): if len(self.v) == 4 : glBegin(GL_QUADS) elif len(self.v) > 4 : glBegin(GL_POLYGON) else: glBegin(GL_TRIANGLES) for p in self.v: glVertex3f(p[0], p[1],0) # draw each vertex glEnd() color = (self._color.vec4[0] * 0.5, self._color.vec4[1] * 0.5, self._color.vec4[2] * 0.5, self._color.vec4[3] * 0.5) glColor4f(*color) glBegin(GL_LINE_LOOP) for p in self.v: glVertex3f(p[0], p[1],0) # draw each vertex glEnd() def make_circle(radius=10, res=30, filled=True): points = [] for i in range(res): ang = 2*math.pi*i / res points.append((math.cos(ang)*radius, math.sin(ang)*radius)) if filled: return FilledPolygon(points) else: return PolyLine(points, True) def make_polygon(v, filled=True): if filled: return FilledPolygon(v) else: return PolyLine(v, True) def make_polyline(v): return PolyLine(v, False) def make_capsule(length, width): l, r, t, b = 0, length, width/2, -width/2 box = make_polygon([(l,b), (l,t), (r,t), (r,b)]) circ0 = make_circle(width/2) circ1 = make_circle(width/2) circ1.add_attr(Transform(translation=(length, 0))) geom = Compound([box, circ0, circ1]) return geom class Compound(Geom): def __init__(self, gs): Geom.__init__(self) self.gs = gs for g in self.gs: g.attrs = [a for a in g.attrs if not isinstance(a, Color)] def render1(self): for g in self.gs: g.render() class PolyLine(Geom): def __init__(self, v, close): Geom.__init__(self) self.v = v self.close = close self.linewidth = LineWidth(1) self.add_attr(self.linewidth) def render1(self): glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP) for p in self.v: glVertex3f(p[0], p[1],0) # draw each vertex glEnd() def set_linewidth(self, x): self.linewidth.stroke = x class Line(Geom): def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)): Geom.__init__(self) self.start = start self.end = end self.linewidth = LineWidth(1) self.add_attr(self.linewidth) def render1(self): glBegin(GL_LINES) glVertex2f(*self.start) glVertex2f(*self.end) glEnd() class Image(Geom): def __init__(self, fname, width, height): Geom.__init__(self) self.width = width self.height = height img = pyglet.image.load(fname) self.img = img self.flip = False def render1(self): self.img.blit(-self.width/2, -self.height/2, width=self.width, height=self.height) # ================================================================ class SimpleImageViewer(object): def __init__(self, display=None): self.window = None self.isopen = False self.display = display def imshow(self, arr): if self.window is None: height, width, channels = arr.shape self.window = pyglet.window.Window(width=width, height=height, display=self.display) self.width = width self.height = height self.isopen = True assert arr.shape == (self.height, self.width, 3), "You passed in an image with the wrong number shape" image = pyglet.image.ImageData(self.width, self.height, 'RGB', arr.tobytes(), pitch=self.width * -3) self.window.clear() self.window.switch_to() self.window.dispatch_events() image.blit(0,0) self.window.flip() def close(self): if self.isopen: self.window.close() self.isopen = False def __del__(self): self.close()
[]
[]
[ "DYLD_FALLBACK_LIBRARY_PATH" ]
[]
["DYLD_FALLBACK_LIBRARY_PATH"]
python
1
0
pipelines/dwi_noddi/dwi_noddi_cli.py
# coding: utf8 import clinica.engine as ce class DwiNoddiCLI(ce.CmdParser): def __init__(self): super(DwiNoddiCLI, self).__init__() def define_name(self): """Define the sub-command name to run this pipeline. """ self._name = 'dwi-noddi' def define_description(self): """Define a description of this pipeline. """ self._description = 'NODDI-based processing of DWI datasets.' def define_options(self): """Define the sub-command arguments """ from clinica.engine.cmdparser import PIPELINE_CATEGORIES clinica_comp = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_COMPULSORY']) clinica_comp.add_argument("caps_directory", help='Path to the CAPS directory.') clinica_comp.add_argument("list_bvalues", type=str, help='String listing all the shells (i.e. the b-values) in the corrected DWI datasets comma separated (e.g, 0,300,700,2200)') # Optional arguments clinica_opt = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_OPTIONAL']) clinica_opt.add_argument("-wd", "--working_directory", help='Temporary directory to store pipeline intermediate results') clinica_opt.add_argument("-np", "--n_procs", type=int, default=4, help='Number of cores used to run in parallel') clinica_opt.add_argument("-tsv", "--subjects_sessions_tsv", help='TSV file containing a list of subjects with their sessions.') def run_command(self, args): """ """ import os from tempfile import mkdtemp from clinica.utils.stream import cprint from clinica.pipelines.dwi_noddi.dwi_noddi_pipeline import DwiNoddi pipeline = DwiNoddi( caps_directory=self.absolute_path(args.caps_directory), tsv_file=self.absolute_path(args.subjects_sessions_tsv) ) # Check NODDI Matlab toolbox: try: noddi_matlab_toolbox = os.environ.get('NODDI_MATLAB_TOOLBOX', '') if not noddi_matlab_toolbox: raise RuntimeError('NODDI_MATLAB_TOOLBOX variable is not set') except Exception as e: cprint(str(e)) cprint('NODDI Matlab toolbox has been detected') # Check Niftimatlib toolbox. try: nifti_matlib_toolbox = os.environ.get('NIFTI_MATLIB_TOOLBOX', '') if not nifti_matlib_toolbox: raise RuntimeError('NIFTI_MATLIB_TOOLBOX variable is not set') except Exception as e: cprint(str(e)) cprint('Niftimatlib toolbox has been detected') pipeline.parameters = { 'bvalue_str': dict([ ('bvalue_str', args.list_bvalues) ]), 'n_procs': dict([ ('n_procs', args.n_procs or 4)] ), 'noddi_toolbox_dir': dict([ ('noddi_toolbox_dir', noddi_matlab_toolbox)] ), 'nifti_matlib_dir': dict([ ('nifti_matlib_dir', nifti_matlib_toolbox) ]), } if args.working_directory is None: args.working_directory = mkdtemp() pipeline.base_dir = self.absolute_path(args.working_directory) if args.n_procs: pipeline.run(plugin='MultiProc', plugin_args={'n_procs': args.n_procs}) else: pipeline.run()
[]
[]
[ "NIFTI_MATLIB_TOOLBOX", "NODDI_MATLAB_TOOLBOX" ]
[]
["NIFTI_MATLIB_TOOLBOX", "NODDI_MATLAB_TOOLBOX"]
python
2
0
boolish.py
var1 = True print( "var1 = True") print(bool(var1)) try: var2 = "true" print("var2 = \"true\"") print(bool(var2)) except ValueError: print("SomeError") try: var22 = "false" print("var2 = \"false\"") print(bool(var22)) except ValueError: print("SomeError") try: var22 = "" print(f"var2 = {var22}") print(bool(var22)) except ValueError: print("SomeError") number = 10 print("number = 10") print(bool(number)) number = -40 print("number = -40") print(bool(number)) number = 0 print("number = 0") print(bool(number)) number = 0.5 print("number = 0.5") print(bool(number)) print("print(bool(6<3)") print(bool(6<3))
[]
[]
[]
[]
[]
python
null
null
null
lib-src/lv2/suil/waflib/Tools/qt4.py
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file try: from xml.sax import make_parser from xml.sax.handler import ContentHandler except ImportError: has_xml=False ContentHandler=object else: has_xml=True import os,sys from waflib.Tools import c_preproc,cxx from waflib import Task,Utils,Options,Errors from waflib.TaskGen import feature,after_method,extension from waflib.Configure import conf from waflib import Logs MOC_H=['.h','.hpp','.hxx','.hh'] EXT_RCC=['.qrc'] EXT_UI=['.ui'] EXT_QT4=['.cpp','.cc','.cxx','.C'] QT4_LIBS="QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtXmlPatterns QtWebKit Qt3Support QtHelp QtScript QtDeclarative QtDesigner" class qxx(Task.classes['cxx']): def __init__(self,*k,**kw): Task.Task.__init__(self,*k,**kw) self.moc_done=0 def scan(self): (nodes,names)=c_preproc.scan(self) lst=[] for x in nodes: if x.name.endswith('.moc'): s=x.path_from(self.inputs[0].parent.get_bld()) if s not in names: names.append(s) else: lst.append(x) return(lst,names) def runnable_status(self): if self.moc_done: return Task.Task.runnable_status(self) else: for t in self.run_after: if not t.hasrun: return Task.ASK_LATER self.add_moc_tasks() return Task.Task.runnable_status(self) def create_moc_task(self,h_node,m_node): try: moc_cache=self.generator.bld.moc_cache except AttributeError: moc_cache=self.generator.bld.moc_cache={} try: return moc_cache[h_node] except KeyError: tsk=moc_cache[h_node]=Task.classes['moc'](env=self.env,generator=self.generator) tsk.set_inputs(h_node) tsk.set_outputs(m_node) gen=self.generator.bld.producer gen.outstanding.insert(0,tsk) gen.total+=1 return tsk def add_moc_tasks(self): node=self.inputs[0] bld=self.generator.bld try: self.signature() except KeyError: pass else: delattr(self,'cache_sig') moctasks=[] mocfiles=[] try: tmp_lst=bld.raw_deps[self.uid()] bld.raw_deps[self.uid()]=[] except KeyError: tmp_lst=[] for d in tmp_lst: if not d.endswith('.moc'): continue if d in mocfiles: Logs.error("paranoia owns") continue mocfiles.append(d) h_node=None try:ext=Options.options.qt_header_ext.split() except AttributeError:pass if not ext:ext=MOC_H base2=d[:-4] for x in[node.parent]+self.generator.includes_nodes: for e in ext: h_node=x.find_node(base2+e) if h_node: break if h_node: m_node=h_node.change_ext('.moc') break else: for k in EXT_QT4: if base2.endswith(k): for x in[node.parent]+self.generator.includes_nodes: h_node=x.find_node(base2) if h_node: break if h_node: m_node=h_node.change_ext(k+'.moc') break if not h_node: raise Errors.WafError('no header found for %r which is a moc file'%d) bld.node_deps[(self.inputs[0].parent.abspath(),m_node.name)]=h_node task=self.create_moc_task(h_node,m_node) moctasks.append(task) tmp_lst=bld.raw_deps[self.uid()]=mocfiles lst=bld.node_deps.get(self.uid(),()) for d in lst: name=d.name if name.endswith('.moc'): task=self.create_moc_task(bld.node_deps[(self.inputs[0].parent.abspath(),name)],d) moctasks.append(task) self.run_after.update(set(moctasks)) self.moc_done=1 run=Task.classes['cxx'].__dict__['run'] class trans_update(Task.Task): run_str='${QT_LUPDATE} ${SRC} -ts ${TGT}' color='BLUE' Task.update_outputs(trans_update) class XMLHandler(ContentHandler): def __init__(self): self.buf=[] self.files=[] def startElement(self,name,attrs): if name=='file': self.buf=[] def endElement(self,name): if name=='file': self.files.append(str(''.join(self.buf))) def characters(self,cars): self.buf.append(cars) @extension(*EXT_RCC) def create_rcc_task(self,node): rcnode=node.change_ext('_rc.cpp') rcctask=self.create_task('rcc',node,rcnode) cpptask=self.create_task('cxx',rcnode,rcnode.change_ext('.o')) try: self.compiled_tasks.append(cpptask) except AttributeError: self.compiled_tasks=[cpptask] return cpptask @extension(*EXT_UI) def create_uic_task(self,node): uictask=self.create_task('ui4',node) uictask.outputs=[self.path.find_or_declare(self.env['ui_PATTERN']%node.name[:-3])] @extension('.ts') def add_lang(self,node): self.lang=self.to_list(getattr(self,'lang',[]))+[node] @feature('qt4') @after_method('apply_link') def apply_qt4(self): if getattr(self,'lang',None): qmtasks=[] for x in self.to_list(self.lang): if isinstance(x,str): x=self.path.find_resource(x+'.ts') qmtasks.append(self.create_task('ts2qm',x,x.change_ext('.qm'))) if getattr(self,'update',None)and Options.options.trans_qt4: cxxnodes=[a.inputs[0]for a in self.compiled_tasks]+[a.inputs[0]for a in self.tasks if getattr(a,'inputs',None)and a.inputs[0].name.endswith('.ui')] for x in qmtasks: self.create_task('trans_update',cxxnodes,x.inputs) if getattr(self,'langname',None): qmnodes=[x.outputs[0]for x in qmtasks] rcnode=self.langname if isinstance(rcnode,str): rcnode=self.path.find_or_declare(rcnode+'.qrc') t=self.create_task('qm2rcc',qmnodes,rcnode) k=create_rcc_task(self,t.outputs[0]) self.link_task.inputs.append(k.outputs[0]) lst=[] for flag in self.to_list(self.env['CXXFLAGS']): if len(flag)<2:continue f=flag[0:2] if f in['-D','-I','/D','/I']: if(f[0]=='/'): lst.append('-'+flag[1:]) else: lst.append(flag) self.env.append_value('MOC_FLAGS',lst) @extension(*EXT_QT4) def cxx_hook(self,node): return self.create_compiled_task('qxx',node) class rcc(Task.Task): color='BLUE' run_str='${QT_RCC} -name ${SRC[0].name} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}' ext_out=['.h'] def scan(self): node=self.inputs[0] if not has_xml: Logs.error('no xml support was found, the rcc dependencies will be incomplete!') return([],[]) parser=make_parser() curHandler=XMLHandler() parser.setContentHandler(curHandler) fi=open(self.inputs[0].abspath(),'r') try: parser.parse(fi) finally: fi.close() nodes=[] names=[] root=self.inputs[0].parent for x in curHandler.files: nd=root.find_resource(x) if nd:nodes.append(nd) else:names.append(x) return(nodes,names) class moc(Task.Task): color='BLUE' run_str='${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}' class ui4(Task.Task): color='BLUE' run_str='${QT_UIC} ${SRC} -o ${TGT}' ext_out=['.h'] class ts2qm(Task.Task): color='BLUE' run_str='${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' class qm2rcc(Task.Task): color='BLUE' after='ts2qm' def run(self): txt='\n'.join(['<file>%s</file>'%k.path_from(self.outputs[0].parent)for k in self.inputs]) code='<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>'%txt self.outputs[0].write(code) def configure(self): self.find_qt4_binaries() self.set_qt4_libs_to_check() self.set_qt4_defines() self.find_qt4_libraries() self.add_qt4_rpath() self.simplify_qt4_libs() @conf def find_qt4_binaries(self): env=self.env opt=Options.options qtdir=getattr(opt,'qtdir','') qtbin=getattr(opt,'qtbin','') paths=[] if qtdir: qtbin=os.path.join(qtdir,'bin') if not qtdir: qtdir=os.environ.get('QT4_ROOT','') qtbin=os.environ.get('QT4_BIN',None)or os.path.join(qtdir,'bin') if qtbin: paths=[qtbin] if not qtdir: paths=os.environ.get('PATH','').split(os.pathsep) paths.append('/usr/share/qt4/bin/') try: lst=Utils.listdir('/usr/local/Trolltech/') except OSError: pass else: if lst: lst.sort() lst.reverse() qtdir='/usr/local/Trolltech/%s/'%lst[0] qtbin=os.path.join(qtdir,'bin') paths.append(qtbin) cand=None prev_ver=['4','0','0'] for qmk in['qmake-qt4','qmake4','qmake']: try: qmake=self.find_program(qmk,path_list=paths) except self.errors.ConfigurationError: pass else: try: version=self.cmd_and_log([qmake,'-query','QT_VERSION']).strip() except self.errors.WafError: pass else: if version: new_ver=version.split('.') if new_ver>prev_ver: cand=qmake prev_ver=new_ver if cand: self.env.QMAKE=cand else: self.fatal('Could not find qmake for qt4') qtbin=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_BINS']).strip()+os.sep def find_bin(lst,var): if var in env: return for f in lst: try: ret=self.find_program(f,path_list=paths) except self.errors.ConfigurationError: pass else: env[var]=ret break find_bin(['uic-qt3','uic3'],'QT_UIC3') find_bin(['uic-qt4','uic'],'QT_UIC') if not env['QT_UIC']: self.fatal('cannot find the uic compiler for qt4') try: uicver=self.cmd_and_log(env['QT_UIC']+" -version 2>&1").strip() except self.errors.ConfigurationError: self.fatal('this uic compiler is for qt3, add uic for qt4 to your path') uicver=uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt','') self.msg('Checking for uic version','%s'%uicver) if uicver.find(' 3.')!=-1: self.fatal('this uic compiler is for qt3, add uic for qt4 to your path') find_bin(['moc-qt4','moc'],'QT_MOC') find_bin(['rcc-qt4','rcc'],'QT_RCC') find_bin(['lrelease-qt4','lrelease'],'QT_LRELEASE') find_bin(['lupdate-qt4','lupdate'],'QT_LUPDATE') env['UIC3_ST']='%s -o %s' env['UIC_ST']='%s -o %s' env['MOC_ST']='-o' env['ui_PATTERN']='ui_%s.h' env['QT_LRELEASE_FLAGS']=['-silent'] env.MOCCPPPATH_ST='-I%s' env.MOCDEFINES_ST='-D%s' @conf def find_qt4_libraries(self): qtlibs=getattr(Options.options,'qtlibs',None)or os.environ.get("QT4_LIBDIR",None) if not qtlibs: try: qtlibs=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_LIBS']).strip() except Errors.WafError: qtdir=self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_PREFIX']).strip()+os.sep qtlibs=os.path.join(qtdir,'lib') self.msg('Found the Qt4 libraries in',qtlibs) qtincludes=os.environ.get("QT4_INCLUDES",None)or self.cmd_and_log([self.env.QMAKE,'-query','QT_INSTALL_HEADERS']).strip() env=self.env if not'PKG_CONFIG_PATH'in os.environ: os.environ['PKG_CONFIG_PATH']='%s:%s/pkgconfig:/usr/lib/qt4/lib/pkgconfig:/opt/qt4/lib/pkgconfig:/usr/lib/qt4/lib:/opt/qt4/lib'%(qtlibs,qtlibs) try: if os.environ.get("QT4_XCOMPILE",None): raise self.errors.ConfigurationError() self.check_cfg(atleast_pkgconfig_version='0.1') except self.errors.ConfigurationError: for i in self.qt4_vars: uselib=i.upper() if Utils.unversioned_sys_platform()=="darwin": frameworkName=i+".framework" qtDynamicLib=os.path.join(qtlibs,frameworkName,i) if os.path.exists(qtDynamicLib): env.append_unique('FRAMEWORK_'+uselib,i) self.msg('Checking for %s'%i,qtDynamicLib,'GREEN') else: self.msg('Checking for %s'%i,False,'YELLOW') env.append_unique('INCLUDES_'+uselib,os.path.join(qtlibs,frameworkName,'Headers')) elif env.DEST_OS!="win32": qtDynamicLib=os.path.join(qtlibs,"lib"+i+".so") qtStaticLib=os.path.join(qtlibs,"lib"+i+".a") if os.path.exists(qtDynamicLib): env.append_unique('LIB_'+uselib,i) self.msg('Checking for %s'%i,qtDynamicLib,'GREEN') elif os.path.exists(qtStaticLib): env.append_unique('LIB_'+uselib,i) self.msg('Checking for %s'%i,qtStaticLib,'GREEN') else: self.msg('Checking for %s'%i,False,'YELLOW') env.append_unique('LIBPATH_'+uselib,qtlibs) env.append_unique('INCLUDES_'+uselib,qtincludes) env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i)) else: for k in("lib%s.a","lib%s4.a","%s.lib","%s4.lib"): lib=os.path.join(qtlibs,k%i) if os.path.exists(lib): env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')]) self.msg('Checking for %s'%i,lib,'GREEN') break else: self.msg('Checking for %s'%i,False,'YELLOW') env.append_unique('LIBPATH_'+uselib,qtlibs) env.append_unique('INCLUDES_'+uselib,qtincludes) env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i)) uselib=i.upper()+"_debug" for k in("lib%sd.a","lib%sd4.a","%sd.lib","%sd4.lib"): lib=os.path.join(qtlibs,k%i) if os.path.exists(lib): env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')]) self.msg('Checking for %s'%i,lib,'GREEN') break else: self.msg('Checking for %s'%i,False,'YELLOW') env.append_unique('LIBPATH_'+uselib,qtlibs) env.append_unique('INCLUDES_'+uselib,qtincludes) env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i)) else: for i in self.qt4_vars_debug+self.qt4_vars: self.check_cfg(package=i,args='--cflags --libs',mandatory=False) @conf def simplify_qt4_libs(self): env=self.env def process_lib(vars_,coreval): for d in vars_: var=d.upper() if var=='QTCORE': continue value=env['LIBPATH_'+var] if value: core=env[coreval] accu=[] for lib in value: if lib in core: continue accu.append(lib) env['LIBPATH_'+var]=accu process_lib(self.qt4_vars,'LIBPATH_QTCORE') process_lib(self.qt4_vars_debug,'LIBPATH_QTCORE_DEBUG') @conf def add_qt4_rpath(self): env=self.env if getattr(Options.options,'want_rpath',False): def process_rpath(vars_,coreval): for d in vars_: var=d.upper() value=env['LIBPATH_'+var] if value: core=env[coreval] accu=[] for lib in value: if var!='QTCORE': if lib in core: continue accu.append('-Wl,--rpath='+lib) env['RPATH_'+var]=accu process_rpath(self.qt4_vars,'LIBPATH_QTCORE') process_rpath(self.qt4_vars_debug,'LIBPATH_QTCORE_DEBUG') @conf def set_qt4_libs_to_check(self): if not hasattr(self,'qt4_vars'): self.qt4_vars=QT4_LIBS self.qt4_vars=Utils.to_list(self.qt4_vars) if not hasattr(self,'qt4_vars_debug'): self.qt4_vars_debug=[a+'_debug'for a in self.qt4_vars] self.qt4_vars_debug=Utils.to_list(self.qt4_vars_debug) @conf def set_qt4_defines(self): if sys.platform!='win32': return for x in self.qt4_vars: y=x[2:].upper() self.env.append_unique('DEFINES_%s'%x.upper(),'QT_%s_LIB'%y) self.env.append_unique('DEFINES_%s_DEBUG'%x.upper(),'QT_%s_LIB'%y) def options(opt): opt.add_option('--want-rpath',action='store_true',default=False,dest='want_rpath',help='enable the rpath for qt libraries') opt.add_option('--header-ext',type='string',default='',help='header extension for moc files',dest='qt_header_ext') for i in'qtdir qtbin qtlibs'.split(): opt.add_option('--'+i,type='string',default='',dest=i) opt.add_option('--translate',action="store_true",help="collect translation strings",dest="trans_qt4",default=False)
[]
[]
[ "QT4_XCOMPILE", "QT4_INCLUDES", "QT4_LIBDIR", "QT4_BIN", "PKG_CONFIG_PATH", "QT4_ROOT", "PATH" ]
[]
["QT4_XCOMPILE", "QT4_INCLUDES", "QT4_LIBDIR", "QT4_BIN", "PKG_CONFIG_PATH", "QT4_ROOT", "PATH"]
python
7
0
daemon/daemon_unix.go
//go:build linux || freebsd // +build linux freebsd package daemon // import "github.com/docker/docker/daemon" import ( "bufio" "context" "fmt" "net" "os" "path/filepath" "runtime" "runtime/debug" "strconv" "strings" "sync" "time" "github.com/containerd/cgroups" statsV1 "github.com/containerd/cgroups/stats/v1" statsV2 "github.com/containerd/cgroups/v2/stats" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/blkiodev" pblkiodev "github.com/docker/docker/api/types/blkiodev" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/initlayer" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/drivers/bridge" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/options" lntypes "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" volumemounts "github.com/docker/docker/volume/mounts" "github.com/moby/sys/mount" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) const ( isWindows = false // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 linuxMinCPUShares = 2 linuxMaxCPUShares = 262144 platformSupported = true // It's not kernel limit, we want this 6M limit to account for overhead during startup, and to supply a reasonable functional container linuxMinMemory = 6291456 // constants for remapped root settings defaultIDSpecifier = "default" defaultRemappedID = "dockremap" // constant for cgroup drivers cgroupFsDriver = "cgroupfs" cgroupSystemdDriver = "systemd" cgroupNoneDriver = "none" ) type containerGetter interface { GetContainer(string) (*container.Container, error) } func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { memory := specs.LinuxMemory{} if config.Memory > 0 { memory.Limit = &config.Memory } if config.MemoryReservation > 0 { memory.Reservation = &config.MemoryReservation } if config.MemorySwap > 0 { memory.Swap = &config.MemorySwap } if config.MemorySwappiness != nil { swappiness := uint64(*config.MemorySwappiness) memory.Swappiness = &swappiness } if config.OomKillDisable != nil { memory.DisableOOMKiller = config.OomKillDisable } if config.KernelMemory != 0 { memory.Kernel = &config.KernelMemory } if config.KernelMemoryTCP != 0 { memory.KernelTCP = &config.KernelMemoryTCP } return &memory } func getPidsLimit(config containertypes.Resources) *specs.LinuxPids { if config.PidsLimit == nil { return nil } if *config.PidsLimit <= 0 { // docker API allows 0 and negative values to unset this to be consistent // with default values. When updating values, runc requires -1 to unset // the previous limit. return &specs.LinuxPids{Limit: -1} } return &specs.LinuxPids{Limit: *config.PidsLimit} } func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { cpu := specs.LinuxCPU{} if config.CPUShares < 0 { return nil, fmt.Errorf("shares: invalid argument") } if config.CPUShares >= 0 { shares := uint64(config.CPUShares) cpu.Shares = &shares } if config.CpusetCpus != "" { cpu.Cpus = config.CpusetCpus } if config.CpusetMems != "" { cpu.Mems = config.CpusetMems } if config.NanoCPUs > 0 { // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt period := uint64(100 * time.Millisecond / time.Microsecond) quota := config.NanoCPUs * int64(period) / 1e9 cpu.Period = &period cpu.Quota = &quota } if config.CPUPeriod != 0 { period := uint64(config.CPUPeriod) cpu.Period = &period } if config.CPUQuota != 0 { q := config.CPUQuota cpu.Quota = &q } if config.CPURealtimePeriod != 0 { period := uint64(config.CPURealtimePeriod) cpu.RealtimePeriod = &period } if config.CPURealtimeRuntime != 0 { c := config.CPURealtimeRuntime cpu.RealtimeRuntime = &c } return &cpu, nil } func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { var stat unix.Stat_t var blkioWeightDevices []specs.LinuxWeightDevice for _, weightDevice := range config.BlkioWeightDevice { if err := unix.Stat(weightDevice.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: weightDevice.Path, Err: err}) } weight := weightDevice.Weight d := specs.LinuxWeightDevice{Weight: &weight} // The type is 32bit on mips. d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert blkioWeightDevices = append(blkioWeightDevices, d) } return blkioWeightDevices, nil } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { container.NoNewPrivileges = daemon.configStore.NoNewPrivileges return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { var ( labelOpts []string err error ) for _, opt := range config.SecurityOpt { if opt == "no-new-privileges" { container.NoNewPrivileges = true continue } if opt == "disable" { labelOpts = append(labelOpts, "disable") continue } var con []string if strings.Contains(opt, "=") { con = strings.SplitN(opt, "=", 2) } else if strings.Contains(opt, ":") { con = strings.SplitN(opt, ":", 2) logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") } if len(con) != 2 { return fmt.Errorf("invalid --security-opt 1: %q", opt) } switch con[0] { case "label": labelOpts = append(labelOpts, con[1]) case "apparmor": container.AppArmorProfile = con[1] case "seccomp": container.SeccompProfile = con[1] case "no-new-privileges": noNewPrivileges, err := strconv.ParseBool(con[1]) if err != nil { return fmt.Errorf("invalid --security-opt 2: %q", opt) } container.NoNewPrivileges = noNewPrivileges default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } } container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) return err } func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { var throttleDevices []specs.LinuxThrottleDevice var stat unix.Stat_t for _, d := range devs { if err := unix.Stat(d.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: d.Path, Err: err}) } d := specs.LinuxThrottleDevice{Rate: d.Rate} // the type is 32bit on mips d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert throttleDevices = append(throttleDevices, d) } return throttleDevices, nil } // adjustParallelLimit takes a number of objects and a proposed limit and // figures out if it's reasonable (and adjusts it accordingly). This is only // used for daemon startup, which does a lot of parallel loading of containers // (and if we exceed RLIMIT_NOFILE then we're in trouble). func adjustParallelLimit(n int, limit int) int { // Rule-of-thumb overhead factor (how many files will each goroutine open // simultaneously). Yes, this is ugly but to be frank this whole thing is // ugly. const overhead = 2 // On Linux, we need to ensure that parallelStartupJobs doesn't cause us to // exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it // and give a warning (since in theory the user should increase their // ulimits to the largest possible value for dockerd). var rlim unix.Rlimit if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err) return limit } softRlimit := int(rlim.Cur) // Much fewer containers than RLIMIT_NOFILE. No need to adjust anything. if softRlimit > overhead*n { return limit } // RLIMIT_NOFILE big enough, no need to adjust anything. if softRlimit > overhead*limit { return limit } logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit) return softRlimit / overhead } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { hostConfig.ShmSize = config.DefaultShmSize if daemon.configStore != nil { hostConfig.ShmSize = int64(daemon.configStore.ShmSize) } } // Set default IPC mode, if unset for container if hostConfig.IpcMode.IsEmpty() { m := config.DefaultIpcMode if daemon.configStore != nil { m = containertypes.IpcMode(daemon.configStore.IpcMode) } hostConfig.IpcMode = m } // Set default cgroup namespace mode, if unset for container if hostConfig.CgroupnsMode.IsEmpty() { // for cgroup v2: unshare cgroupns even for privileged containers // https://github.com/containers/libpod/pull/4374#issuecomment-549776387 if hostConfig.Privileged && cgroups.Mode() != cgroups.Unified { hostConfig.CgroupnsMode = containertypes.CgroupnsModeHost } else { m := containertypes.CgroupnsModeHost if cgroups.Mode() == cgroups.Unified { m = containertypes.CgroupnsModePrivate } if daemon.configStore != nil { m = containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode) } hostConfig.CgroupnsMode = m } } adaptSharedNamespaceContainer(daemon, hostConfig) var err error secOpts, err := daemon.generateSecurityOpt(hostConfig) if err != nil { return err } hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...) if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable } return nil } // adaptSharedNamespaceContainer replaces container name with its ID in hostConfig. // To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode // and NetworkMode. // // When a container shares its namespace with another container, use ID can keep the namespace // sharing connection between the two containers even the another container is renamed. func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) { containerPrefix := "container:" if hostConfig.PidMode.IsContainer() { pidContainer := hostConfig.PidMode.Container() // if there is any error returned here, we just ignore it and leave it to be // handled in the following logic if c, err := daemon.GetContainer(pidContainer); err == nil { hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID) } } if hostConfig.IpcMode.IsContainer() { ipcContainer := hostConfig.IpcMode.Container() if c, err := daemon.GetContainer(ipcContainer); err == nil { hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID) } } if hostConfig.NetworkMode.IsContainer() { netContainer := hostConfig.NetworkMode.ConnectedContainer() if c, err := daemon.GetContainer(netContainer); err == nil { hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID) } } } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) { fixMemorySwappiness(resources) // memory subsystem checks and adjustments if resources.Memory != 0 && resources.Memory < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory limit allowed is 6MB") } if resources.Memory > 0 && !sysInfo.MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.Memory = 0 resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") } if resources.Memory == 0 && resources.MemorySwap > 0 && !update { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") } if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") resources.MemorySwappiness = nil } if resources.MemorySwappiness != nil { swappiness := *resources.MemorySwappiness if swappiness < 0 || swappiness > 100 { return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) } } if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.MemoryReservation = 0 } if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory reservation allowed is 6MB") } if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") } if resources.KernelMemory > 0 { // Kernel memory limit is not supported on cgroup v2. // Even on cgroup v1, kernel memory limit (`kmem.limit_in_bytes`) has been deprecated since kernel 5.4. // https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0b5adf44cae99b3ebcc7 if !sysInfo.KernelMemory { warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.KernelMemory = 0 } if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 6MB") } if !kernel.CheckKernelVersion(4, 0, 0) { warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") } } if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point // warning the caller if they already wanted the feature to be off if *resources.OomKillDisable { warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") } resources.OomKillDisable = nil } if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 { warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.") } if resources.PidsLimit != nil && !sysInfo.PidsLimit { if *resources.PidsLimit > 0 { warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.") } resources.PidsLimit = nil } // cpu subsystem checks and adjustments if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") } if resources.NanoCPUs > 0 && !sysInfo.CPUCfs { return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU CFS scheduler or the cgroup is not mounted") } // The highest precision we could get on Linux is 0.001, by setting // cpu.cfs_period_us=1000ms // cpu.cfs_quota=1ms // See the following link for details: // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. // The error message is 0.01 so that this is consistent with Windows if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.CPUShares > 0 && !sysInfo.CPUShares { warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") resources.CPUShares = 0 } if (resources.CPUPeriod != 0 || resources.CPUQuota != 0) && !sysInfo.CPUCfs { warnings = append(warnings, "Your kernel does not support CPU CFS scheduler. CPU period/quota discarded.") resources.CPUPeriod = 0 resources.CPUQuota = 0 } if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") } if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") } if resources.CPUPercent > 0 { warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) resources.CPUPercent = 0 } // cpuset subsystem checks and adjustments if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") resources.CpusetCpus = "" resources.CpusetMems = "" } cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus) } if !cpusAvailable { return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) } memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems) } if !memsAvailable { return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) } // blkio subsystem checks and adjustments if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") resources.BlkioWeight = 0 } if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") } if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) } if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} } if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} } return warnings, nil } func (daemon *Daemon) getCgroupDriver() string { if UsingSystemd(daemon.configStore) { return cgroupSystemdDriver } if daemon.Rootless() { return cgroupNoneDriver } return cgroupFsDriver } // getCD gets the raw value of the native.cgroupdriver option, if set. func getCD(config *config.Config) string { for _, option := range config.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { continue } return val } return "" } // verifyCgroupDriver validates native.cgroupdriver func verifyCgroupDriver(config *config.Config) error { cd := getCD(config) if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { return nil } if cd == cgroupNoneDriver { return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd) } return fmt.Errorf("native.cgroupdriver option %s not supported", cd) } // UsingSystemd returns true if cli option includes native.cgroupdriver=systemd func UsingSystemd(config *config.Config) bool { cd := getCD(config) if cd == cgroupSystemdDriver { return true } // On cgroup v2 hosts, default to systemd driver if cd == "" && cgroups.Mode() == cgroups.Unified && isRunningSystemd() { return true } return false } var ( runningSystemd bool detectSystemd sync.Once ) // isRunningSystemd checks whether the host was booted with systemd as its init // system. This functions similarly to systemd's `sd_booted(3)`: internally, it // checks whether /run/systemd/system/ exists and is a directory. // http://www.freedesktop.org/software/systemd/man/sd_booted.html // // NOTE: This function comes from package github.com/coreos/go-systemd/util // It was borrowed here to avoid a dependency on cgo. func isRunningSystemd() bool { detectSystemd.Do(func() { fi, err := os.Lstat("/run/systemd/system") if err != nil { return } runningSystemd = fi.IsDir() }) return runningSystemd } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } sysInfo := daemon.RawSysInfo() w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update) // no matter err is nil or not, w could have data in itself. warnings = append(warnings, w...) if err != nil { return warnings, err } if !hostConfig.IpcMode.Valid() { return warnings, errors.Errorf("invalid IPC mode: %v", hostConfig.IpcMode) } if !hostConfig.PidMode.Valid() { return warnings, errors.Errorf("invalid PID mode: %v", hostConfig.PidMode) } if hostConfig.ShmSize < 0 { return warnings, fmt.Errorf("SHM size can not be less than 0") } if !hostConfig.UTSMode.Valid() { return warnings, errors.Errorf("invalid UTS mode: %v", hostConfig.UTSMode) } if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) } // ip-forwarding does not affect container with '--net=host' (or '--net=none') if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") } if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 { warnings = append(warnings, "Published ports are discarded when using host network mode") } // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { if hostConfig.Privileged { return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode") } if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled") } if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled") } } if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { // CgroupParent for systemd cgroup should be named as "xxx.slice" if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if hostConfig.Runtime == "" { hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() } if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } parser := volumemounts.NewParser() for dest := range hostConfig.Tmpfs { if err := parser.ValidateTmpfsMountDestination(dest); err != nil { return warnings, err } } if !hostConfig.CgroupnsMode.Valid() { return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode) } if hostConfig.CgroupnsMode.IsPrivate() { if !sysInfo.CgroupNamespaces { warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.") } } if hostConfig.Runtime == config.LinuxV1RuntimeName || (hostConfig.Runtime == "" && daemon.configStore.DefaultRuntime == config.LinuxV1RuntimeName) { warnings = append(warnings, fmt.Sprintf("Configured runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName)) } return warnings, nil } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(conf *config.Config) error { if conf.ContainerdNamespace == conf.ContainerdPluginNamespace { return errors.New("containers namespace and plugins namespace cannot be the same") } // Check for mutually incompatible config options if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") } if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") } if conf.BridgeConfig.EnableIP6Tables && !conf.Experimental { return fmt.Errorf("ip6tables rules are only available if experimental features are enabled") } if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { conf.BridgeConfig.EnableIPMasq = false } if err := verifyCgroupDriver(conf); err != nil { return err } if conf.CgroupParent != "" && UsingSystemd(conf) { if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if conf.Rootless && UsingSystemd(conf) && cgroups.Mode() != cgroups.Unified { return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode") } configureRuntimes(conf) if rtName := conf.GetDefaultRuntimeName(); rtName != "" { if conf.GetRuntime(rtName) == nil { return fmt.Errorf("specified default runtime '%s' does not exist", rtName) } if rtName == config.LinuxV1RuntimeName { logrus.Warnf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName) } } return nil } // checkSystem validates platform-specific requirements func checkSystem() error { return nil } // configureMaxThreads sets the Go runtime max threads threshold // which is 90% of the kernel setting from /proc/sys/kernel/threads-max func configureMaxThreads(config *config.Config) error { mt, err := os.ReadFile("/proc/sys/kernel/threads-max") if err != nil { return err } mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) if err != nil { return err } maxThreads := (mtint / 100) * 90 debug.SetMaxThreads(maxThreads) logrus.Debugf("Golang's threads limit set to %d", maxThreads) return nil } func overlaySupportsSelinux() (bool, error) { f, err := os.Open("/proc/kallsyms") if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), " security_inode_copy_up") { return true, nil } } return false, s.Err() } // configureKernelSecuritySupport configures and validates security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { if config.EnableSelinuxSupport { if !selinux.GetEnabled() { logrus.Warn("Docker could not enable SELinux on the host system") return nil } if driverName == "overlay" || driverName == "overlay2" { // If driver is overlay or overlay2, make sure kernel // supports selinux with overlay. supported, err := overlaySupportsSelinux() if err != nil { return err } if !supported { logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) } } } else { selinux.SetDisabled() } return nil } // initNetworkController initializes the libnetwork controller and configures // network settings. If there's active sandboxes, configuration changes will not // take effect. func (daemon *Daemon) initNetworkController(activeSandboxes map[string]interface{}) error { netOptions, err := daemon.networkOptions(daemon.PluginStore, activeSandboxes) if err != nil { return err } daemon.netController, err = libnetwork.New(netOptions...) if err != nil { return fmt.Errorf("error obtaining controller instance: %v", err) } if len(activeSandboxes) > 0 { logrus.Info("there are running containers, updated network configuration will not take affect") } else if err := configureNetworking(daemon.netController, daemon.configStore); err != nil { return err } // Set HostGatewayIP to the default bridge's IP if it is empty setHostGatewayIP(daemon.netController, daemon.configStore) return nil } func configureNetworking(controller libnetwork.NetworkController, conf *config.Config) error { // Initialize default network on "null" if n, _ := controller.NetworkByName("none"); n == nil { if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { return errors.Wrap(err, `error creating default "null" network`) } } // Initialize default network on "host" if n, _ := controller.NetworkByName("host"); n == nil { if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { return errors.Wrap(err, `error creating default "host" network`) } } // Clear stale bridge network if n, err := controller.NetworkByName("bridge"); err == nil { if err = n.Delete(); err != nil { return errors.Wrap(err, `could not delete the default "bridge"" network`) } if len(conf.NetworkConfig.DefaultAddressPools.Value()) > 0 && !conf.LiveRestoreEnabled { removeDefaultBridgeInterface() } } if !conf.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, conf); err != nil { return err } } else { removeDefaultBridgeInterface() } return nil } // setHostGatewayIP sets cfg.HostGatewayIP to the default bridge's IP if it is empty. func setHostGatewayIP(controller libnetwork.NetworkController, config *config.Config) { if config.HostGatewayIP != nil { return } if n, err := controller.NetworkByName("bridge"); err == nil { v4Info, v6Info := n.Info().IpamInfo() var gateway net.IP if len(v4Info) > 0 { gateway = v4Info[0].Gateway.IP } else if len(v6Info) > 0 { gateway = v6Info[0].Gateway.IP } config.HostGatewayIP = gateway } } func driverOptions(config *config.Config) nwconfig.Option { return nwconfig.OptionDriverConfig("bridge", options.Generic{ netlabel.GenericData: options.Generic{ "EnableIPForwarding": config.BridgeConfig.EnableIPForward, "EnableIPTables": config.BridgeConfig.EnableIPTables, "EnableIP6Tables": config.BridgeConfig.EnableIP6Tables, "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath, }, }) } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { bridgeName := bridge.DefaultBridgeName if config.BridgeConfig.Iface != "" { bridgeName = config.BridgeConfig.Iface } netOption := map[string]string{ bridge.BridgeName: bridgeName, bridge.DefaultBridge: strconv.FormatBool(true), netlabel.DriverMTU: strconv.Itoa(config.Mtu), bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), } // --ip processing if config.BridgeConfig.DefaultIP != nil { netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() } ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) if err != nil { return errors.Wrap(err, "list bridge addresses failed") } nw := nwList[0] if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return errors.Wrap(err, "parse CIDR failed") } // Iterate through in case there are multiple addresses for the bridge for _, entry := range nwList { if fCIDR.Contains(entry.IP) { nw = entry break } } } ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) if hip.IsGlobalUnicast() { ipamV4Conf.Gateway = nw.IP.String() } if config.BridgeConfig.IP != "" { ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP) if err != nil { return err } ipamV4Conf.PreferredPool = ipNet.String() ipamV4Conf.Gateway = ip.String() } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } if config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return err } ipamV4Conf.SubPool = fCIDR.String() } if config.BridgeConfig.DefaultGatewayIPv4 != nil { ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() } var ( deferIPv6Alloc bool ipamV6Conf *libnetwork.IpamConf ) if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" { return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6")) } else if config.BridgeConfig.FixedCIDRv6 != "" { _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) if err != nil { return err } // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has // at least 48 host bits, we need to guarantee the current behavior where the containers' // IPv6 addresses will be constructed based on the containers' interface MAC address. // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints // on this network until after the driver has created the endpoint and returned the // constructed address. Libnetwork will then reserve this address with the ipam driver. ones, _ := fCIDRv6.Mask.Size() deferIPv6Alloc = ones <= 80 ipamV6Conf = &libnetwork.IpamConf{ AuxAddresses: make(map[string]string), PreferredPool: fCIDRv6.String(), } // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 // address belongs to the same network, we need to inform libnetwork about it, so // that it can be reserved with IPAM and it will not be given away to somebody else for _, nw6 := range nw6List { if fCIDRv6.Contains(nw6.IP) { ipamV6Conf.Gateway = nw6.IP.String() break } } } if config.BridgeConfig.DefaultGatewayIPv6 != nil { if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() } v4Conf := []*libnetwork.IpamConf{ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} if ipamV6Conf != nil { v6Conf = append(v6Conf, ipamV6Conf) } // Initialize default network on "bridge" with the same name _, err = controller.NewNetwork("bridge", "bridge", "", libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), libnetwork.NetworkOptionDriverOpts(netOption), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) if err != nil { return fmt.Errorf("Error creating default \"bridge\" network: %v", err) } return nil } // Remove default bridge interface if present (--bridge=none use case) func removeDefaultBridgeInterface() { if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { if err := netlink.LinkDel(lnk); err != nil { logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) } } } func setupInitLayer(idMapping idtools.IdentityMapping) func(containerfs.ContainerFS) error { return func(initPath containerfs.ContainerFS) error { return initlayer.Setup(initPath, idMapping.RootPair()) } } // Parse the remapped root (user namespace) option, which can be one of: // username - valid username from /etc/passwd // username:groupname - valid username; valid groupname from /etc/group // uid - 32-bit unsigned int valid Linux UID value // uid:gid - uid value; 32-bit unsigned int Linux GID value // // If no groupname is specified, and a username is specified, an attempt // will be made to lookup a gid for that username as a groupname // // If names are used, they are verified to exist in passwd/group func parseRemappedRoot(usergrp string) (string, string, error) { var ( userID, groupID int username, groupname string ) idparts := strings.Split(usergrp, ":") if len(idparts) > 2 { return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) } if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { // must be a uid; take it as valid userID = int(uid) luser, err := idtools.LookupUID(userID) if err != nil { return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) } username = luser.Name if len(idparts) == 1 { // if the uid was numeric and no gid was specified, take the uid as the gid groupID = userID lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) } groupname = lgrp.Name } } else { lookupName := idparts[0] // special case: if the user specified "default", they want Docker to create or // use (after creation) the "dockremap" user/group for root remapping if lookupName == defaultIDSpecifier { lookupName = defaultRemappedID } luser, err := idtools.LookupUser(lookupName) if err != nil && idparts[0] != defaultIDSpecifier { // error if the name requested isn't the special "dockremap" ID return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) } else if err != nil { // special case-- if the username == "default", then we have been asked // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} // ranges will be used for the user and group mappings in user namespaced containers _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) if err == nil { return defaultRemappedID, defaultRemappedID, nil } return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) } username = luser.Name if len(idparts) == 1 { // we only have a string username, and no group specified; look up gid from username as group group, err := idtools.LookupGroup(lookupName) if err != nil { return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) } groupname = group.Name } } if len(idparts) == 2 { // groupname or gid is separately specified and must be resolved // to an unsigned 32-bit gid if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { // must be a gid, take it as valid groupID = int(gid) lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) } groupname = lgrp.Name } else { // not a number; attempt a lookup if _, err := idtools.LookupGroup(idparts[1]); err != nil { return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) } groupname = idparts[1] } } return username, groupname, nil } func setupRemappedRoot(config *config.Config) (idtools.IdentityMapping, error) { if runtime.GOOS != "linux" && config.RemappedRoot != "" { return idtools.IdentityMapping{}, fmt.Errorf("User namespaces are only supported on Linux") } // if the daemon was started with remapped root option, parse // the config option to the int uid,gid values if config.RemappedRoot != "" { username, groupname, err := parseRemappedRoot(config.RemappedRoot) if err != nil { return idtools.IdentityMapping{}, err } if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") return idtools.IdentityMapping{}, nil } logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) mappings, err := idtools.LoadIdentityMapping(username) if err != nil { return idtools.IdentityMapping{}, errors.Wrap(err, "Can't create ID mappings") } return mappings, nil } return idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools.Identity) error { config.Root = rootDir // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) // so that syscalls executing as non-root, operating on subdirectories of the graph root // (e.g. mounted layers of a container) can traverse this path. // The user namespace support will create subdirectories for the remapped root host uid:gid // pair owned by that same uid:gid pair for proper write access to those needed metadata and // layer content subtrees. if _, err := os.Stat(rootDir); err == nil { // root current exists; verify the access bits are correct by setting them if err = os.Chmod(rootDir, 0711); err != nil { return err } } else if os.IsNotExist(err) { // no root exists yet, create it 0711 with root:root ownership if err := os.MkdirAll(rootDir, 0711); err != nil { return err } } id := idtools.Identity{UID: idtools.CurrentIdentity().UID, GID: remappedRoot.GID} // First make sure the current root dir has the correct perms. if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root) } // if user namespaces are enabled we will create a subtree underneath the specified root // with any/all specified remapped root uid/gid options on the daemon creating // a new subdirectory with ownership set to the remapped uid/gid (so as to allow // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID)) logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exist if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) } // we also need to verify that any pre-existing directories in the path to // the graphroot won't block access to remapped root--if any pre-existing directory // has strict permissions that don't allow "x", container start will fail, so // better to warn and fail now dirPath := config.Root for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if !idtools.CanAccess(dirPath, remappedRoot) { return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root) } } } if err := setupDaemonRootPropagation(config); err != nil { logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") } return nil } func setupDaemonRootPropagation(cfg *config.Config) error { rootParentMount, mountOptions, err := getSourceMount(cfg.Root) if err != nil { return errors.Wrap(err, "error getting daemon root's parent mount") } var cleanupOldFile bool cleanupFile := getUnmountOnShutdownPath(cfg) defer func() { if !cleanupOldFile { return } if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) { logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") } }() if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) { cleanupOldFile = true return nil } if err := mount.MakeShared(cfg.Root); err != nil { return errors.Wrap(err, "could not setup daemon root propagation to shared") } // check the case where this may have already been a mount to itself. // If so then the daemon only performed a remount and should not try to unmount this later. if rootParentMount == cfg.Root { cleanupOldFile = true return nil } if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil { return errors.Wrap(err, "error creating dir to store mount cleanup file") } if err := os.WriteFile(cleanupFile, nil, 0600); err != nil { return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown") } return nil } // getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown // the daemon root should be unmounted. func getUnmountOnShutdownPath(config *config.Config) string { return filepath.Join(config.ExecRoot, "unmount-on-shutdown") } // registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { return nil } for _, l := range hostConfig.Links { name, alias, err := opts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.registerLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig _, err := container.WriteHostConfig() return err } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { return daemon.Unmount(container) } func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry { out := make([]types.BlkioStatEntry, len(entries)) for i, re := range entries { out[i] = types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: re.Op, Value: re.Value, } } return out } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } cs, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } s := &types.StatsJSON{} s.Read = cs.Read stats := cs.Metrics switch t := stats.(type) { case *statsV1.Metrics: return daemon.statsV1(s, t) case *statsV2.Metrics: return daemon.statsV2(s, t) default: return nil, errors.Errorf("unexpected type of metrics %+v", t) } } func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) { if stats.Blkio != nil { s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive), IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive), IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive), IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive), IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive), IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive), IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive), SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive), } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.Usage.Total, PercpuUsage: stats.CPU.Usage.PerCPU, UsageInKernelmode: stats.CPU.Usage.Kernel, UsageInUsermode: stats.CPU.Usage.User, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.Throttling.Periods, ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods, ThrottledTime: stats.CPU.Throttling.ThrottledTime, }, } } if stats.Memory != nil { raw := map[string]uint64{ "cache": stats.Memory.Cache, "rss": stats.Memory.RSS, "rss_huge": stats.Memory.RSSHuge, "mapped_file": stats.Memory.MappedFile, "dirty": stats.Memory.Dirty, "writeback": stats.Memory.Writeback, "pgpgin": stats.Memory.PgPgIn, "pgpgout": stats.Memory.PgPgOut, "pgfault": stats.Memory.PgFault, "pgmajfault": stats.Memory.PgMajFault, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "hierarchical_memory_limit": stats.Memory.HierarchicalMemoryLimit, "hierarchical_memsw_limit": stats.Memory.HierarchicalSwapLimit, "total_cache": stats.Memory.TotalCache, "total_rss": stats.Memory.TotalRSS, "total_rss_huge": stats.Memory.TotalRSSHuge, "total_mapped_file": stats.Memory.TotalMappedFile, "total_dirty": stats.Memory.TotalDirty, "total_writeback": stats.Memory.TotalWriteback, "total_pgpgin": stats.Memory.TotalPgPgIn, "total_pgpgout": stats.Memory.TotalPgPgOut, "total_pgfault": stats.Memory.TotalPgFault, "total_pgmajfault": stats.Memory.TotalPgMajFault, "total_inactive_anon": stats.Memory.TotalInactiveAnon, "total_active_anon": stats.Memory.TotalActiveAnon, "total_inactive_file": stats.Memory.TotalInactiveFile, "total_active_file": stats.Memory.TotalActiveFile, "total_unevictable": stats.Memory.TotalUnevictable, } if stats.Memory.Usage != nil { s.MemoryStats = types.MemoryStats{ Stats: raw, Usage: stats.Memory.Usage.Usage, MaxUsage: stats.Memory.Usage.Max, Limit: stats.Memory.Usage.Limit, Failcnt: stats.Memory.Usage.Failcnt, } } else { s.MemoryStats = types.MemoryStats{ Stats: raw, } } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) { if stats.Io != nil { var isbr []types.BlkioStatEntry for _, re := range stats.Io.Usage { isbr = append(isbr, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "read", Value: re.Rbytes, }, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "write", Value: re.Wbytes, }, ) } s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: isbr, // Other fields are unsupported } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.UsageUsec * 1000, // PercpuUsage is not supported UsageInKernelmode: stats.CPU.SystemUsec * 1000, UsageInUsermode: stats.CPU.UserUsec * 1000, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.NrPeriods, ThrottledPeriods: stats.CPU.NrThrottled, ThrottledTime: stats.CPU.ThrottledUsec * 1000, }, } } if stats.Memory != nil { s.MemoryStats = types.MemoryStats{ // Stats is not compatible with v1 Stats: map[string]uint64{ "anon": stats.Memory.Anon, "file": stats.Memory.File, "kernel_stack": stats.Memory.KernelStack, "slab": stats.Memory.Slab, "sock": stats.Memory.Sock, "shmem": stats.Memory.Shmem, "file_mapped": stats.Memory.FileMapped, "file_dirty": stats.Memory.FileDirty, "file_writeback": stats.Memory.FileWriteback, "anon_thp": stats.Memory.AnonThp, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "slab_reclaimable": stats.Memory.SlabReclaimable, "slab_unreclaimable": stats.Memory.SlabUnreclaimable, "pgfault": stats.Memory.Pgfault, "pgmajfault": stats.Memory.Pgmajfault, "workingset_refault": stats.Memory.WorkingsetRefault, "workingset_activate": stats.Memory.WorkingsetActivate, "workingset_nodereclaim": stats.Memory.WorkingsetNodereclaim, "pgrefill": stats.Memory.Pgrefill, "pgscan": stats.Memory.Pgscan, "pgsteal": stats.Memory.Pgsteal, "pgactivate": stats.Memory.Pgactivate, "pgdeactivate": stats.Memory.Pgdeactivate, "pglazyfree": stats.Memory.Pglazyfree, "pglazyfreed": stats.Memory.Pglazyfreed, "thp_fault_alloc": stats.Memory.ThpFaultAlloc, "thp_collapse_alloc": stats.Memory.ThpCollapseAlloc, }, Usage: stats.Memory.Usage, // MaxUsage is not supported Limit: stats.Memory.UsageLimit, } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } if stats.MemoryEvents != nil { // Failcnt is set to the "oom" field of the "memory.events" file. // See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html s.MemoryStats.Failcnt = stats.MemoryEvents.Oom } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } // setDefaultIsolation determines the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { return nil } // setupDaemonProcess sets various settings for the daemon's process func setupDaemonProcess(config *config.Config) error { // setup the daemons oom_score_adj if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil { return err } if err := setMayDetachMounts(); err != nil { logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter") } return nil } // This is used to allow removal of mountpoints that may be mounted in other // namespaces on RHEL based kernels starting from RHEL 7.4. // Without this setting, removals on these RHEL based kernels may fail with // "device or resource busy". // This setting is not available in upstream kernels as it is not configurable, // but has been in the upstream kernels since 3.15. func setMayDetachMounts() error { f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0) if err != nil { if os.IsNotExist(err) { return nil } return errors.Wrap(err, "error opening may_detach_mounts kernel config file") } defer f.Close() _, err = f.WriteString("1") if os.IsPermission(err) { // Setting may_detach_mounts does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") } return nil } return err } func setupOOMScoreAdj(score int) error { if score == 0 { return nil } f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) if err != nil { return err } defer f.Close() stringScore := strconv.Itoa(score) _, err = f.WriteString(stringScore) if os.IsPermission(err) { // Setting oom_score_adj does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) } return nil } return err } func (daemon *Daemon) initCPURtController(mnt, path string) error { if path == "/" || path == "." { return nil } // Recursively create cgroup to ensure that the system and all parent cgroups have values set // for the period and runtime as this limits what the children can be set to. if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil { return err } path = filepath.Join(mnt, path) if err := os.MkdirAll(path, 0755); err != nil { return err } if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { return err } return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) } func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error { if configValue == 0 { return nil } return os.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700) } func (daemon *Daemon) setupSeccompProfile() error { switch profile := daemon.configStore.SeccompProfile; profile { case "", config.SeccompProfileDefault: daemon.seccompProfilePath = config.SeccompProfileDefault case config.SeccompProfileUnconfined: daemon.seccompProfilePath = config.SeccompProfileUnconfined default: daemon.seccompProfilePath = profile b, err := os.ReadFile(profile) if err != nil { return fmt.Errorf("opening seccomp profile (%s) failed: %v", profile, err) } daemon.seccompProfile = b } return nil } func (daemon *Daemon) loadSysInfo() { var siOpts []sysinfo.Opt if daemon.getCgroupDriver() == cgroupSystemdDriver { if euid := os.Getenv("ROOTLESSKIT_PARENT_EUID"); euid != "" { siOpts = append(siOpts, sysinfo.WithCgroup2GroupPath("/user.slice/user-"+euid+".slice")) } } daemon.sysInfo = sysinfo.New(siOpts...) } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) return err } func recursiveUnmount(target string) error { return mount.RecursiveUnmount(target) }
[ "\"ROOTLESSKIT_PARENT_EUID\"" ]
[]
[ "ROOTLESSKIT_PARENT_EUID" ]
[]
["ROOTLESSKIT_PARENT_EUID"]
go
1
0
pkg/machine/store/config/config.go
package config import ( "os" "path/filepath" "github.com/rancher/norman/types/convert" "github.com/rancher/norman/types/values" "github.com/rancher/rancher/pkg/machine/store" "github.com/rancher/types/apis/management.cattle.io/v3" "github.com/rancher/types/config" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/errors" ) const ( configKey = "extractedConfig" defaultCattleHome = "./management-state" ) type MachineConfig struct { store *store.GenericEncryptedStore baseDir string id string cm map[string]string } func NewStore(management *config.ManagementContext) (*store.GenericEncryptedStore, error) { return store.NewGenericEncrypedStore("mc-", "", management.Core.Namespaces(""), management.K8sClient.CoreV1()) } func NewMachineConfig(store *store.GenericEncryptedStore, machine *v3.Machine) (*MachineConfig, error) { machineDir, err := buildBaseHostDir(machine.Spec.RequestedHostname) if err != nil { return nil, err } logrus.Debugf("Created machine storage directory %s", machineDir) return &MachineConfig{ store: store, id: machine.Name, baseDir: machineDir, }, nil } func (m *MachineConfig) Dir() string { return m.baseDir } func (m *MachineConfig) Cleanup() error { return os.RemoveAll(m.baseDir) } func (m *MachineConfig) Remove() error { m.Cleanup() return m.store.Remove(m.id) } func (m *MachineConfig) TLSConfig() (*TLSConfig, error) { if err := m.loadConfig(); err != nil { return nil, err } return extractTLS(m.cm[configKey]) } func (m *MachineConfig) IP() (string, error) { config, err := m.getConfig() if err != nil { return "", err } return convert.ToString(values.GetValueN(config, "Driver", "IPAddress")), nil } func (m *MachineConfig) InternalIP() (string, error) { config, err := m.getConfig() if err != nil { return "", err } return convert.ToString(values.GetValueN(config, "Driver", "PrivateIPAddress")), nil } func (m *MachineConfig) Save() error { extractedConfig, err := compressConfig(m.baseDir) if err != nil { return err } if err := m.loadConfig(); err != nil { return err } if m.cm[configKey] == extractedConfig { return nil } m.cm[configKey] = extractedConfig if err := m.store.Set(m.id, m.cm); err != nil { m.cm = nil return err } return nil } func (m *MachineConfig) Restore() error { if err := m.loadConfig(); err != nil { return err } data := m.cm[configKey] if data == "" { return nil } return extractConfig(m.baseDir, data) } func (m *MachineConfig) loadConfig() error { if m.cm != nil { return nil } cm, err := m.getConfigMap() if err != nil { return err } if cm == nil { cm = map[string]string{} } m.cm = cm return nil } func (m *MachineConfig) getConfigMap() (map[string]string, error) { configMap, err := m.store.Get(m.id) if errors.IsNotFound(err) { return nil, nil } if err != nil { return nil, err } return configMap, nil } func (m *MachineConfig) getConfig() (map[string]interface{}, error) { if err := m.loadConfig(); err != nil { return nil, err } data := m.cm[configKey] if data == "" { return nil, nil } return extractConfigJSON(data) } func buildBaseHostDir(machineName string) (string, error) { machineDir := filepath.Join(getWorkDir(), "machines", machineName) return machineDir, os.MkdirAll(machineDir, 0740) } func getWorkDir() string { workDir := os.Getenv("MACHINE_WORK_DIR") if workDir == "" { workDir = os.Getenv("CATTLE_HOME") } if workDir == "" { workDir = defaultCattleHome } return filepath.Join(workDir, "machine") }
[ "\"MACHINE_WORK_DIR\"", "\"CATTLE_HOME\"" ]
[]
[ "MACHINE_WORK_DIR", "CATTLE_HOME" ]
[]
["MACHINE_WORK_DIR", "CATTLE_HOME"]
go
2
0
pkg/apiserver/authentication/identityprovider/ldap_provider_test.go
/* Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package identityprovider import ( "github.com/fearlesschenc/kubesphere/pkg/apiserver/authentication/oauth" "github.com/google/go-cmp/cmp" "gopkg.in/yaml.v3" "io/ioutil" "os" "testing" ) func TestNewLdapProvider(t *testing.T) { options := ` host: test.sn.mynetname.net:389 managerDN: uid=root,cn=users,dc=test,dc=sn,dc=mynetname,dc=net managerPassword: test startTLS: false userSearchBase: dc=test,dc=sn,dc=mynetname,dc=net loginAttribute: uid mailAttribute: mail ` var dynamicOptions oauth.DynamicOptions err := yaml.Unmarshal([]byte(options), &dynamicOptions) if err != nil { t.Fatal(err) } provider, err := NewLdapProvider(&dynamicOptions) if err != nil { t.Fatal(err) } got := provider.(*ldapProvider).options expected := ldapOptions{ Host: "test.sn.mynetname.net:389", StartTLS: false, InsecureSkipVerify: false, ReadTimeout: 15000, RootCA: "", RootCAData: "", ManagerDN: "uid=root,cn=users,dc=test,dc=sn,dc=mynetname,dc=net", ManagerPassword: "test", UserSearchBase: "dc=test,dc=sn,dc=mynetname,dc=net", UserSearchFilter: "", GroupSearchBase: "", GroupSearchFilter: "", UserMemberAttribute: "", GroupMemberAttribute: "", LoginAttribute: "uid", MailAttribute: "mail", DisplayNameAttribute: "", } if diff := cmp.Diff(got, expected); diff != "" { t.Errorf("%T differ (-got, +want): %s", expected, diff) } } func TestLdapProvider_Authenticate(t *testing.T) { configFile := os.Getenv("LDAP_TEST_FILE") if configFile == "" { t.Skip("Skipped") } options, err := ioutil.ReadFile(configFile) if err != nil { t.Fatal(err) } var dynamicOptions oauth.DynamicOptions if err := yaml.Unmarshal(options, &dynamicOptions); err != nil { t.Fatal(err) } provider, err := NewLdapProvider(&dynamicOptions) if err != nil { t.Fatal(err) } if _, err := provider.Authenticate("test", "test"); err != nil { t.Fatal(err) } }
[ "\"LDAP_TEST_FILE\"" ]
[]
[ "LDAP_TEST_FILE" ]
[]
["LDAP_TEST_FILE"]
go
1
0
test/extended/router/weighted.go
package images import ( "encoding/csv" "fmt" "net/http" "os" "strconv" "strings" "time" g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util/wait" e2e "k8s.io/kubernetes/test/e2e/framework" exutil "github.com/openshift/origin/test/extended/util" ) var _ = g.Describe("[Conformance][networking][router] weighted openshift router", func() { defer g.GinkgoRecover() var ( configPath = exutil.FixturePath("testdata", "weighted-router.yaml") oc = exutil.NewCLI("weighted-router", exutil.KubeConfigPath()) ) g.BeforeEach(func() { image := os.Getenv("OPENSHIFT_ROUTER_IMAGE") if len(image) == 0 { g.Skip("Skipping HAProxy router tests, OPENSHIFT_ROUTER_IMAGE is unset") } err := oc.AsAdmin().Run("adm").Args("policy", "add-cluster-role-to-user", "system:router", oc.Username()).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("new-app").Args("-f", configPath, "-p", "IMAGE="+image).Execute() o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("The HAProxy router", func() { g.It("should appropriately serve a route that points to two services", func() { defer func() { // This should be done if the test fails but // for now always dump the logs. // if g.CurrentGinkgoTestDescription().Failed dumpWeightedRouterLogs(oc, g.CurrentGinkgoTestDescription().FullTestText) }() oc.SetOutputDir(exutil.TestContext.OutputDir) ns := oc.KubeFramework().Namespace.Name execPodName := exutil.CreateExecPodOrFail(oc.AdminKubeClient().Core(), ns, "execpod") defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, kapi.NewDeleteOptions(1)) }() g.By(fmt.Sprintf("creating a weighted router from a config file %q", configPath)) var routerIP string err := wait.Poll(time.Second, changeTimeoutSeconds*time.Second, func() (bool, error) { pod, err := oc.KubeFramework().ClientSet.Core().Pods(oc.KubeFramework().Namespace.Name).Get("weighted-router") if err != nil { return false, err } if len(pod.Status.PodIP) == 0 { return false, nil } routerIP = pod.Status.PodIP return true, nil }) o.Expect(err).NotTo(o.HaveOccurred()) // router expected to listen on port 80 routerURL := fmt.Sprintf("http://%s", routerIP) g.By("waiting for the healthz endpoint to respond") healthzURI := fmt.Sprintf("http://%s:1936/healthz", routerIP) err = waitForRouterOKResponseExec(ns, execPodName, healthzURI, routerIP, changeTimeoutSeconds) o.Expect(err).NotTo(o.HaveOccurred()) host := "weighted.example.com" times := 100 g.By(fmt.Sprintf("checking that %d requests go through successfully", times)) // wait for the request to stabilize err = waitForRouterOKResponseExec(ns, execPodName, routerURL, "weighted.example.com", changeTimeoutSeconds) o.Expect(err).NotTo(o.HaveOccurred()) // all requests should now succeed err = expectRouteStatusCodeRepeatedExec(ns, execPodName, routerURL, "weighted.example.com", http.StatusOK, times) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking that there are two weighted backends in the router stats")) var trafficValues []string err = wait.PollImmediate(100*time.Millisecond, changeTimeoutSeconds*time.Second, func() (bool, error) { statsURL := fmt.Sprintf("http://%s:1936/;csv", routerIP) stats, err := getAuthenticatedRouteURLViaPod(ns, execPodName, statsURL, host, "admin", "password") o.Expect(err).NotTo(o.HaveOccurred()) trafficValues, err = parseStats(stats, "weightedroute", 7) o.Expect(err).NotTo(o.HaveOccurred()) return len(trafficValues) == 2, nil }) o.Expect(err).NotTo(o.HaveOccurred()) trafficEP1, err := strconv.Atoi(trafficValues[0]) o.Expect(err).NotTo(o.HaveOccurred()) trafficEP2, err := strconv.Atoi(trafficValues[1]) o.Expect(err).NotTo(o.HaveOccurred()) weightedRatio := float32(trafficEP1) / float32(trafficEP2) if weightedRatio < 5 && weightedRatio > 0.2 { e2e.Failf("Unexpected weighted ratio for incoming traffic: %v (%d/%d)", weightedRatio, trafficEP1, trafficEP2) } g.By(fmt.Sprintf("checking that zero weights are also respected by the router")) host = "zeroweight.example.com" err = expectRouteStatusCodeExec(ns, execPodName, routerURL, host, http.StatusServiceUnavailable) o.Expect(err).NotTo(o.HaveOccurred()) }) }) }) func parseStats(stats string, backendSubstr string, statsField int) ([]string, error) { r := csv.NewReader(strings.NewReader(stats)) records, err := r.ReadAll() if err != nil { return nil, err } fieldValues := make([]string, 0) for _, rec := range records { if strings.Contains(rec[0], backendSubstr) && !strings.Contains(rec[1], "BACKEND") { fieldValues = append(fieldValues, rec[statsField]) } } return fieldValues, nil } func dumpWeightedRouterLogs(oc *exutil.CLI, name string) { log, _ := e2e.GetPodLogs(oc.AdminKubeClient(), oc.KubeFramework().Namespace.Name, "weighted-router", "router") e2e.Logf("Weighted Router test %s logs:\n %s", name, log) }
[ "\"OPENSHIFT_ROUTER_IMAGE\"" ]
[]
[ "OPENSHIFT_ROUTER_IMAGE" ]
[]
["OPENSHIFT_ROUTER_IMAGE"]
go
1
0
providers/dns/azure/azure_test.go
package azure import ( "os" "testing" "time" "github.com/stretchr/testify/assert" ) var ( azureLiveTest bool azureClientID string azureClientSecret string azureSubscriptionID string azureTenantID string azureResourceGroup string azureDomain string ) func init() { azureClientID = os.Getenv("AZURE_CLIENT_ID") azureClientSecret = os.Getenv("AZURE_CLIENT_SECRET") azureSubscriptionID = os.Getenv("AZURE_SUBSCRIPTION_ID") azureTenantID = os.Getenv("AZURE_TENANT_ID") azureResourceGroup = os.Getenv("AZURE_RESOURCE_GROUP") azureDomain = os.Getenv("AZURE_DOMAIN") if len(azureClientID) > 0 && len(azureClientSecret) > 0 { azureLiveTest = true } } func restoreEnv() { os.Setenv("AZURE_CLIENT_ID", azureClientID) os.Setenv("AZURE_SUBSCRIPTION_ID", azureSubscriptionID) } func TestNewDNSProviderValid(t *testing.T) { if !azureLiveTest { t.Skip("skipping live test (requires credentials)") } defer restoreEnv() os.Setenv("AZURE_CLIENT_ID", "") _, err := NewDNSProviderCredentials(azureClientID, azureClientSecret, azureSubscriptionID, azureTenantID, azureResourceGroup) assert.NoError(t, err) } func TestNewDNSProviderValidEnv(t *testing.T) { if !azureLiveTest { t.Skip("skipping live test (requires credentials)") } defer restoreEnv() os.Setenv("AZURE_CLIENT_ID", "other") _, err := NewDNSProvider() assert.NoError(t, err) } func TestNewDNSProviderMissingCredErr(t *testing.T) { defer restoreEnv() os.Setenv("AZURE_SUBSCRIPTION_ID", "") _, err := NewDNSProvider() assert.EqualError(t, err, "Azure: some credentials information are missing: AZURE_CLIENT_ID,AZURE_CLIENT_SECRET,AZURE_SUBSCRIPTION_ID,AZURE_TENANT_ID,AZURE_RESOURCE_GROUP") } func TestLiveAzurePresent(t *testing.T) { if !azureLiveTest { t.Skip("skipping live test") } provider, err := NewDNSProviderCredentials(azureClientID, azureClientSecret, azureSubscriptionID, azureTenantID, azureResourceGroup) assert.NoError(t, err) err = provider.Present(azureDomain, "", "123d==") assert.NoError(t, err) } func TestLiveAzureCleanUp(t *testing.T) { if !azureLiveTest { t.Skip("skipping live test") } provider, err := NewDNSProviderCredentials(azureClientID, azureClientSecret, azureSubscriptionID, azureTenantID, azureResourceGroup) time.Sleep(time.Second * 1) assert.NoError(t, err) err = provider.CleanUp(azureDomain, "", "123d==") assert.NoError(t, err) }
[ "\"AZURE_CLIENT_ID\"", "\"AZURE_CLIENT_SECRET\"", "\"AZURE_SUBSCRIPTION_ID\"", "\"AZURE_TENANT_ID\"", "\"AZURE_RESOURCE_GROUP\"", "\"AZURE_DOMAIN\"" ]
[]
[ "AZURE_DOMAIN", "AZURE_CLIENT_ID", "AZURE_CLIENT_SECRET", "AZURE_SUBSCRIPTION_ID", "AZURE_TENANT_ID", "AZURE_RESOURCE_GROUP" ]
[]
["AZURE_DOMAIN", "AZURE_CLIENT_ID", "AZURE_CLIENT_SECRET", "AZURE_SUBSCRIPTION_ID", "AZURE_TENANT_ID", "AZURE_RESOURCE_GROUP"]
go
6
0
confz/loaders/env_loader.py
import os from typing import Dict, Optional, Any from dotenv import dotenv_values from confz.confz_source import ConfZEnvSource from .loader import Loader class EnvLoader(Loader): """Config loader for environment variables.""" @classmethod def _transform_name(cls, name: str): return name.lower().replace("-", "_") @classmethod def _transform_remap( cls, map_in: Optional[Dict[str, str]] ) -> Optional[Dict[str, str]]: if map_in is None: return None map_out = {} for key, value in map_in.items(): map_out[cls._transform_name(key)] = value return map_out @classmethod def _check_allowance(cls, var_name: str, confz_source: ConfZEnvSource) -> bool: if not confz_source.allow_all: if confz_source.allow is None: return False allow_list = [cls._transform_name(var) for var in confz_source.allow] if var_name not in allow_list: return False if confz_source.deny is not None: deny_list = [cls._transform_name(var) for var in confz_source.deny] if var_name in deny_list: return False return True @classmethod def populate_config(cls, config: dict, confz_source: ConfZEnvSource): remap = cls._transform_remap(confz_source.remap) origin_env_vars: Dict[str, Any] = dict(os.environ) if confz_source.file is not None: origin_env_vars = {**dotenv_values(confz_source.file), **origin_env_vars} env_vars = {} for env_var in origin_env_vars: var_name = env_var if confz_source.prefix is not None: if not var_name.startswith(confz_source.prefix): continue var_name = var_name[len(confz_source.prefix) :] var_name = cls._transform_name(var_name) if not cls._check_allowance(var_name, confz_source): continue if remap is not None and var_name in remap: var_name = remap[var_name] env_vars[var_name] = origin_env_vars[env_var] env_vars = cls.transform_nested_dicts(env_vars) cls.update_dict_recursively(config, env_vars)
[]
[]
[]
[]
[]
python
0
0
vendor/github.com/elazarl/goproxy/https.go
package goproxy import ( "bufio" "crypto/tls" "errors" "io" "io/ioutil" "net" "net/http" "net/url" "os" "regexp" "strconv" "strings" "sync" "sync/atomic" ) type ConnectActionLiteral int const ( ConnectAccept = iota ConnectReject ConnectMitm ConnectHijack ConnectHTTPMitm ConnectProxyAuthHijack ) var ( OkConnect = &ConnectAction{Action: ConnectAccept, TLSConfig: TLSConfigFromCA(&GoproxyCa)} MitmConnect = &ConnectAction{Action: ConnectMitm, TLSConfig: TLSConfigFromCA(&GoproxyCa)} HTTPMitmConnect = &ConnectAction{Action: ConnectHTTPMitm, TLSConfig: TLSConfigFromCA(&GoproxyCa)} RejectConnect = &ConnectAction{Action: ConnectReject, TLSConfig: TLSConfigFromCA(&GoproxyCa)} httpsRegexp = regexp.MustCompile(`^https:\/\/`) ) type ConnectAction struct { Action ConnectActionLiteral Hijack func(req *http.Request, client net.Conn, ctx *ProxyCtx) TLSConfig func(host string, ctx *ProxyCtx) (*tls.Config, error) } func stripPort(s string) string { ix := strings.IndexRune(s, ':') if ix == -1 { return s } return s[:ix] } func (proxy *ProxyHttpServer) dial(network, addr string) (c net.Conn, err error) { if proxy.Tr.Dial != nil { return proxy.Tr.Dial(network, addr) } return net.Dial(network, addr) } func (proxy *ProxyHttpServer) connectDial(network, addr string) (c net.Conn, err error) { if proxy.ConnectDial == nil { return proxy.dial(network, addr) } return proxy.ConnectDial(network, addr) } func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request) { ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy} hij, ok := w.(http.Hijacker) if !ok { panic("httpserver does not support hijacking") } proxyClient, _, e := hij.Hijack() if e != nil { panic("Cannot hijack connection " + e.Error()) } ctx.Logf("Running %d CONNECT handlers", len(proxy.httpsHandlers)) todo, host := OkConnect, r.URL.Host for i, h := range proxy.httpsHandlers { newtodo, newhost := h.HandleConnect(host, ctx) // If found a result, break the loop immediately if newtodo != nil { todo, host = newtodo, newhost ctx.Logf("on %dth handler: %v %s", i, todo, host) break } } switch todo.Action { case ConnectAccept: if !hasPort.MatchString(host) { host += ":80" } targetSiteCon, err := proxy.connectDial("tcp", host) if err != nil { httpError(proxyClient, ctx, err) return } ctx.Logf("Accepting CONNECT to %s", host) proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) targetTCP, targetOK := targetSiteCon.(*net.TCPConn) proxyClientTCP, clientOK := proxyClient.(*net.TCPConn) if targetOK && clientOK { go copyAndClose(ctx, targetTCP, proxyClientTCP) go copyAndClose(ctx, proxyClientTCP, targetTCP) } else { go func() { var wg sync.WaitGroup wg.Add(2) go copyOrWarn(ctx, targetSiteCon, proxyClient, &wg) go copyOrWarn(ctx, proxyClient, targetSiteCon, &wg) wg.Wait() proxyClient.Close() targetSiteCon.Close() }() } case ConnectHijack: ctx.Logf("Hijacking CONNECT to %s", host) proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) todo.Hijack(r, proxyClient, ctx) case ConnectHTTPMitm: proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) ctx.Logf("Assuming CONNECT is plain HTTP tunneling, mitm proxying it") targetSiteCon, err := proxy.connectDial("tcp", host) if err != nil { ctx.Warnf("Error dialing to %s: %s", host, err.Error()) return } for { client := bufio.NewReader(proxyClient) remote := bufio.NewReader(targetSiteCon) req, err := http.ReadRequest(client) if err != nil && err != io.EOF { ctx.Warnf("cannot read request of MITM HTTP client: %+#v", err) } if err != nil { return } req, resp := proxy.filterRequest(req, ctx) if resp == nil { if err := req.Write(targetSiteCon); err != nil { httpError(proxyClient, ctx, err) return } resp, err = http.ReadResponse(remote, req) if err != nil { httpError(proxyClient, ctx, err) return } defer resp.Body.Close() } resp = proxy.filterResponse(resp, ctx) if err := resp.Write(proxyClient); err != nil { httpError(proxyClient, ctx, err) return } } case ConnectMitm: proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) ctx.Logf("Assuming CONNECT is TLS, mitm proxying it") // this goes in a separate goroutine, so that the net/http server won't think we're // still handling the request even after hijacking the connection. Those HTTP CONNECT // request can take forever, and the server will be stuck when "closed". // TODO: Allow Server.Close() mechanism to shut down this connection as nicely as possible tlsConfig := defaultTLSConfig if todo.TLSConfig != nil { var err error tlsConfig, err = todo.TLSConfig(host, ctx) if err != nil { httpError(proxyClient, ctx, err) return } } go func() { //TODO: cache connections to the remote website rawClientTls := tls.Server(proxyClient, tlsConfig) if err := rawClientTls.Handshake(); err != nil { ctx.Warnf("Cannot handshake client %v %v", r.Host, err) return } defer rawClientTls.Close() clientTlsReader := bufio.NewReader(rawClientTls) for !isEof(clientTlsReader) { req, err := http.ReadRequest(clientTlsReader) var ctx = &ProxyCtx{Req: req, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy} if err != nil && err != io.EOF { return } if err != nil { ctx.Warnf("Cannot read TLS request from mitm'd client %v %v", r.Host, err) return } req.RemoteAddr = r.RemoteAddr // since we're converting the request, need to carry over the original connecting IP as well ctx.Logf("req %v", r.Host) if !httpsRegexp.MatchString(req.URL.String()) { req.URL, err = url.Parse("https://" + r.Host + req.URL.String()) } // Bug fix which goproxy fails to provide request // information URL in the context when does HTTPS MITM ctx.Req = req req, resp := proxy.filterRequest(req, ctx) if resp == nil { if err != nil { ctx.Warnf("Illegal URL %s", "https://"+r.Host+req.URL.Path) return } removeProxyHeaders(ctx, req) resp, err = ctx.RoundTrip(req) if err != nil { ctx.Warnf("Cannot read TLS response from mitm'd server %v", err) return } ctx.Logf("resp %v", resp.Status) } resp = proxy.filterResponse(resp, ctx) defer resp.Body.Close() text := resp.Status statusCode := strconv.Itoa(resp.StatusCode) + " " if strings.HasPrefix(text, statusCode) { text = text[len(statusCode):] } // always use 1.1 to support chunked encoding if _, err := io.WriteString(rawClientTls, "HTTP/1.1"+" "+statusCode+text+"\r\n"); err != nil { ctx.Warnf("Cannot write TLS response HTTP status from mitm'd client: %v", err) return } // Since we don't know the length of resp, return chunked encoded response // TODO: use a more reasonable scheme resp.Header.Del("Content-Length") resp.Header.Set("Transfer-Encoding", "chunked") // Force connection close otherwise chrome will keep CONNECT tunnel open forever resp.Header.Set("Connection", "close") if err := resp.Header.Write(rawClientTls); err != nil { ctx.Warnf("Cannot write TLS response header from mitm'd client: %v", err) return } if _, err = io.WriteString(rawClientTls, "\r\n"); err != nil { ctx.Warnf("Cannot write TLS response header end from mitm'd client: %v", err) return } chunked := newChunkedWriter(rawClientTls) if _, err := io.Copy(chunked, resp.Body); err != nil { ctx.Warnf("Cannot write TLS response body from mitm'd client: %v", err) return } if err := chunked.Close(); err != nil { ctx.Warnf("Cannot write TLS chunked EOF from mitm'd client: %v", err) return } if _, err = io.WriteString(rawClientTls, "\r\n"); err != nil { ctx.Warnf("Cannot write TLS response chunked trailer from mitm'd client: %v", err) return } } ctx.Logf("Exiting on EOF") }() case ConnectProxyAuthHijack: proxyClient.Write([]byte("HTTP/1.1 407 Proxy Authentication Required\r\n")) todo.Hijack(r, proxyClient, ctx) case ConnectReject: if ctx.Resp != nil { if err := ctx.Resp.Write(proxyClient); err != nil { ctx.Warnf("Cannot write response that reject http CONNECT: %v", err) } } proxyClient.Close() } } func httpError(w io.WriteCloser, ctx *ProxyCtx, err error) { if _, err := io.WriteString(w, "HTTP/1.1 502 Bad Gateway\r\n\r\n"); err != nil { ctx.Warnf("Error responding to client: %s", err) } if err := w.Close(); err != nil { ctx.Warnf("Error closing client connection: %s", err) } } func copyOrWarn(ctx *ProxyCtx, dst io.Writer, src io.Reader, wg *sync.WaitGroup) { if _, err := io.Copy(dst, src); err != nil { ctx.Warnf("Error copying to client: %s", err) } wg.Done() } func copyAndClose(ctx *ProxyCtx, dst, src *net.TCPConn) { if _, err := io.Copy(dst, src); err != nil { ctx.Warnf("Error copying to client: %s", err) } dst.CloseWrite() src.CloseRead() } func dialerFromEnv(proxy *ProxyHttpServer) func(network, addr string) (net.Conn, error) { https_proxy := os.Getenv("HTTPS_PROXY") if https_proxy == "" { https_proxy = os.Getenv("https_proxy") } if https_proxy == "" { return nil } return proxy.NewConnectDialToProxy(https_proxy) } func (proxy *ProxyHttpServer) NewConnectDialToProxy(https_proxy string) func(network, addr string) (net.Conn, error) { u, err := url.Parse(https_proxy) if err != nil { return nil } if u.Scheme == "" || u.Scheme == "http" { if strings.IndexRune(u.Host, ':') == -1 { u.Host += ":80" } return func(network, addr string) (net.Conn, error) { connectReq := &http.Request{ Method: "CONNECT", URL: &url.URL{Opaque: addr}, Host: addr, Header: make(http.Header), } c, err := proxy.dial(network, u.Host) if err != nil { return nil, err } connectReq.Write(c) // Read response. // Okay to use and discard buffered reader here, because // TLS server will not speak until spoken to. br := bufio.NewReader(c) resp, err := http.ReadResponse(br, connectReq) if err != nil { c.Close() return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { resp, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } c.Close() return nil, errors.New("proxy refused connection" + string(resp)) } return c, nil } } if u.Scheme == "https" { if strings.IndexRune(u.Host, ':') == -1 { u.Host += ":443" } return func(network, addr string) (net.Conn, error) { c, err := proxy.dial(network, u.Host) if err != nil { return nil, err } c = tls.Client(c, proxy.Tr.TLSClientConfig) connectReq := &http.Request{ Method: "CONNECT", URL: &url.URL{Opaque: addr}, Host: addr, Header: make(http.Header), } connectReq.Write(c) // Read response. // Okay to use and discard buffered reader here, because // TLS server will not speak until spoken to. br := bufio.NewReader(c) resp, err := http.ReadResponse(br, connectReq) if err != nil { c.Close() return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 500)) if err != nil { return nil, err } c.Close() return nil, errors.New("proxy refused connection" + string(body)) } return c, nil } } return nil } func TLSConfigFromCA(ca *tls.Certificate) func(host string, ctx *ProxyCtx) (*tls.Config, error) { return func(host string, ctx *ProxyCtx) (*tls.Config, error) { config := *defaultTLSConfig ctx.Logf("signing for %s", stripPort(host)) cert, err := signHost(*ca, []string{stripPort(host)}) if err != nil { ctx.Warnf("Cannot sign host certificate with provided CA: %s", err) return nil, err } config.Certificates = append(config.Certificates, cert) return &config, nil } }
[ "\"HTTPS_PROXY\"", "\"https_proxy\"" ]
[]
[ "HTTPS_PROXY", "https_proxy" ]
[]
["HTTPS_PROXY", "https_proxy"]
go
2
0
pype/plugins/maya/load/load_reference.py
import pype.maya.plugin from avalon import api, maya from maya import cmds import os from pypeapp import config class ReferenceLoader(pype.maya.plugin.ReferenceLoader): """Load the model""" families = ["model", "pointcache", "animation", "mayaAscii", "setdress", "layout", "camera", "rig"] representations = ["ma", "abc", "fbx"] tool_names = ["loader"] label = "Reference" order = -10 icon = "code-fork" color = "orange" def process_reference(self, context, name, namespace, options): import maya.cmds as cmds from avalon import maya import pymel.core as pm try: family = context["representation"]["context"]["family"] except ValueError: family = "model" with maya.maintained_selection(): groupName = "{}:{}".format(namespace, name) cmds.loadPlugin("AbcImport.mll", quiet=True) nodes = cmds.file(self.fname, namespace=namespace, sharedReferenceFile=False, groupReference=True, groupName="{}:{}".format(namespace, name), reference=True, returnNewNodes=True) # namespace = cmds.referenceQuery(nodes[0], namespace=True) shapes = cmds.ls(nodes, shapes=True, long=True) newNodes = (list(set(nodes) - set(shapes))) current_namespace = pm.namespaceInfo(currentNamespace=True) if current_namespace != ":": groupName = current_namespace + ":" + groupName groupNode = pm.PyNode(groupName) roots = set() for node in newNodes: try: roots.add(pm.PyNode(node).getAllParents()[-2]) except: # noqa: E722 pass for root in roots: root.setParent(world=True) groupNode.zeroTransformPivots() for root in roots: root.setParent(groupNode) cmds.setAttr(groupName + ".displayHandle", 1) presets = config.get_presets(project=os.environ['AVALON_PROJECT']) colors = presets['plugins']['maya']['load']['colors'] c = colors.get(family) if c is not None: groupNode.useOutlinerColor.set(1) groupNode.outlinerColor.set(c[0], c[1], c[2]) self[:] = newNodes cmds.setAttr(groupName + ".displayHandle", 1) # get bounding box bbox = cmds.exactWorldBoundingBox(groupName) # get pivot position on world space pivot = cmds.xform(groupName, q=True, sp=True, ws=True) # center of bounding box cx = (bbox[0] + bbox[3]) / 2 cy = (bbox[1] + bbox[4]) / 2 cz = (bbox[2] + bbox[5]) / 2 # add pivot position to calculate offset cx = cx + pivot[0] cy = cy + pivot[1] cz = cz + pivot[2] # set selection handle offset to center of bounding box cmds.setAttr(groupName + ".selectHandleX", cx) cmds.setAttr(groupName + ".selectHandleY", cy) cmds.setAttr(groupName + ".selectHandleZ", cz) if family == "rig": self._post_process_rig(name, namespace, context, options) else: if "translate" in options: cmds.setAttr(groupName + ".t", *options["translate"]) return newNodes def switch(self, container, representation): self.update(container, representation) def _post_process_rig(self, name, namespace, context, options): output = next((node for node in self if node.endswith("out_SET")), None) controls = next((node for node in self if node.endswith("controls_SET")), None) assert output, "No out_SET in rig, this is a bug." assert controls, "No controls_SET in rig, this is a bug." # Find the roots amongst the loaded nodes roots = cmds.ls(self[:], assemblies=True, long=True) assert roots, "No root nodes in rig, this is a bug." asset = api.Session["AVALON_ASSET"] dependency = str(context["representation"]["_id"]) self.log.info("Creating subset: {}".format(namespace)) # Create the animation instance with maya.maintained_selection(): cmds.select([output, controls] + roots, noExpand=True) api.create(name=namespace, asset=asset, family="animation", options={"useSelection": True}, data={"dependencies": dependency})
[]
[]
[ "AVALON_PROJECT" ]
[]
["AVALON_PROJECT"]
python
1
0
app_config.py
#!/usr/bin/env python """ Project-wide application configuration. DO NOT STORE SECRETS, PASSWORDS, ETC. IN THIS FILE. They will be exposed to users. Use environment variables instead. See get_secrets() below for a fast way to access them. """ import os """ NAMES """ # Project name to be used in urls # Use dashes, not underscores! PROJECT_SLUG = 'aosfatos-lunchbox' # Project name to be used in file paths PROJECT_FILENAME = 'aosfatos-lunchbox' # The name of the repository containing the source REPOSITORY_NAME = 'aosfatos-lunchbox' GITHUB_USERNAME = 'voltdatalab' REPOSITORY_URL = '[email protected]:%s/%s.git' % (GITHUB_USERNAME, REPOSITORY_NAME) REPOSITORY_ALT_URL = None # '[email protected]:nprapps/%s.git' % REPOSITORY_NAME' DEV_CONTACT = 'EDIT THIS IN APP_CONFIG.PY' """ DEPLOYMENT """ PRODUCTION_S3_BUCKET = 'aosfatos-lunchbox' STAGING_S3_BUCKET = 'aosfatos-lunchbox' DEFAULT_MAX_AGE = 20 FILE_SERVER_USER = 'ubuntu' FILE_SERVER = '' FILE_SERVER_PATH = '~/www' # These variables will be set at runtime. See configure_targets() below S3_BUCKET = None S3_BASE_URL = None S3_DEPLOY_URL = None DEBUG = True """ Utilities """ def get_secrets(): """ A method for accessing our secrets. """ secrets_dict = {} for k,v in os.environ.items(): if k.startswith(PROJECT_SLUG): k = k[len(PROJECT_SLUG) + 1:] secrets_dict[k] = v return secrets_dict def configure_targets(deployment_target): """ Configure deployment targets. Abstracted so this can be overriden for rendering before deployment. """ global S3_BUCKET global S3_BASE_URL global S3_DEPLOY_URL global DEBUG global DEPLOYMENT_TARGET global ASSETS_MAX_AGE if deployment_target == 'electron': S3_BUCKET = None S3_BASE_URL = None S3_DEPLOY_URL = None DEBUG = False ASSETS_MAX_AGE = 0 if deployment_target == 'fileserver': S3_BUCKET = None S3_BASE_URL = None S3_DEPLOY_URL = None DEBUG = False ASSETS_MAX_AGE = 0 if deployment_target == 'production': S3_BUCKET = PRODUCTION_S3_BUCKET S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET, PROJECT_SLUG) S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET, PROJECT_SLUG) DEBUG = False ASSETS_MAX_AGE = 86400 elif deployment_target == 'staging': S3_BUCKET = STAGING_S3_BUCKET S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET, PROJECT_SLUG) S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET, PROJECT_SLUG) DEBUG = True ASSETS_MAX_AGE = 20 else: S3_BUCKET = None S3_BASE_URL = 'http://127.0.0.1:8000' S3_DEPLOY_URL = None DEBUG = True ASSETS_MAX_AGE = 20 DEPLOYMENT_TARGET = deployment_target """ Run automated configuration """ DEPLOYMENT_TARGET = os.environ.get('DEPLOYMENT_TARGET', None) configure_targets(DEPLOYMENT_TARGET)
[]
[]
[ "DEPLOYMENT_TARGET" ]
[]
["DEPLOYMENT_TARGET"]
python
1
0
py2app_tests/test_app_with_sharedlib.py
import sys if (sys.version_info[0] == 2 and sys.version_info[:2] >= (2,7)) or \ (sys.version_info[0] == 3 and sys.version_info[:2] >= (3,2)): import unittest else: import unittest2 as unittest import subprocess import shutil import time import os import signal import py2app import hashlib if __name__ == "__main__": from tools import kill_child_processes else: from .tools import kill_child_processes DIR_NAME=os.path.dirname(os.path.abspath(__file__)) class TestBasicAppWithExtension (unittest.TestCase): py2app_args = [] python_args = [] app_dir = os.path.join(DIR_NAME, 'app_with_sharedlib') # Basic setup code # # The code in this block needs to be moved to # a base-class. @classmethod def setUpClass(cls): kill_child_processes() env=os.environ.copy() pp = os.path.dirname(os.path.dirname(py2app.__file__)) env['TMPDIR'] = os.getcwd() if 'PYTHONPATH' in env: env['PYTHONPATH'] = pp + ':' + env['PYTHONPATH'] else: env['PYTHONPATH'] = pp if 'LANG' not in env: # Ensure that testing though SSH works env['LANG'] = 'en_US.UTF-8' p = subprocess.Popen([ sys.executable] + cls.python_args + [ 'setup.py', 'build_ext'], cwd = cls.app_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=False, env=env ) lines = p.communicate()[0] if p.wait() != 0: print (lines) raise AssertionError("Running build_ext failed") p = subprocess.Popen([ sys.executable ] + cls.python_args + [ 'setup.py', 'py2app'] + cls.py2app_args, cwd = cls.app_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=False, env=env ) lines = p.communicate()[0] if p.wait() != 0: print (lines) raise AssertionError("Creating basic_app bundle failed") @classmethod def tearDownClass(cls): if os.path.exists(os.path.join(cls.app_dir, 'build')): shutil.rmtree(os.path.join(cls.app_dir, 'build')) if os.path.exists(os.path.join(cls.app_dir, 'dist')): shutil.rmtree(os.path.join(cls.app_dir, 'dist')) if os.path.exists(os.path.join(cls.app_dir, 'lib')): shutil.rmtree(os.path.join(cls.app_dir, 'lib')) for fn in os.listdir(cls.app_dir): if fn.endswith('.so'): os.unlink(os.path.join(cls.app_dir, fn)) time.sleep(2) def tearDown(self): kill_child_processes() time.sleep(1) def start_app(self): # Start the test app, return a subprocess object where # stdin and stdout are connected to pipes. path = os.path.join( self.app_dir, 'dist/BasicApp.app/Contents/MacOS/BasicApp') p = subprocess.Popen([path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=False, ) #stderr=subprocess.STDOUT) return p def wait_with_timeout(self, proc, timeout=10): for i in range(timeout): x = proc.poll() if x is None: time.sleep(1) else: return x os.kill(proc.pid, signal.SIGKILL) return proc.wait() # # End of setup code # def test_basic_start(self): p = self.start_app() p.stdin.close() exit = self.wait_with_timeout(p) self.assertEqual(exit, 0) p.stdout.close() def test_extension_use(self): p = self.start_app() p.stdin.write('print(double(9))\n'.encode('latin1')) p.stdin.flush() ln = p.stdout.readline() self.assertEqual(ln.strip(), b"18") p.stdin.write('print(square(9))\n'.encode('latin1')) p.stdin.flush() ln = p.stdout.readline() self.assertEqual(ln.strip(), b"81") p.stdin.write('print(half(16))\n'.encode('latin1')) p.stdin.flush() ln = p.stdout.readline() self.assertEqual(ln.strip(), b"8") def test_simple_imports(self): p = self.start_app() # Basic module that is always present: p.stdin.write('import_module("os")\n'.encode('latin1')) p.stdin.flush() ln = p.stdout.readline() self.assertEqual(ln.strip(), b"os") # Dependency of the main module: p.stdin.write('import_module("decimal")\n'.encode('latin1')) p.stdin.flush() ln = p.stdout.readline() self.assertEqual(ln.strip(), b"decimal") can_import_stdlib = False if '--alias' in self.py2app_args: can_import_stdlib = True if '--semi-standalone' in self.py2app_args: can_import_stdlib = True if sys.prefix.startswith('/System/'): can_import_stdlib = True if not can_import_stdlib: # Not a dependency of the module (stdlib): p.stdin.write('import_module("xdrlib")\n'.encode('latin1')) p.stdin.flush() ln = p.stdout.readline().decode('utf-8') self.assertTrue(ln.strip().startswith("* import failed"), ln) else: p.stdin.write('import_module("xdrlib")\n'.encode('latin1')) p.stdin.flush() ln = p.stdout.readline() self.assertEqual(ln.strip(), b"xdrlib") if sys.prefix.startswith('/System') or '--alias' in self.py2app_args: # py2app is included as part of the system install p.stdin.write('import_module("py2app")\n'.encode('latin1')) p.stdin.flush() ln = p.stdout.readline() self.assertEqual(ln.strip(), b"py2app") else: # Not a dependency of the module (external): p.stdin.write('import_module("py2app")\n'.encode('latin1')) p.stdin.flush() ln = p.stdout.readline().decode('utf-8') self.assertTrue(ln.strip().startswith("* import failed"), ln) p.stdin.close() p.stdout.close() def test_app_structure(self): path = os.path.join(self.app_dir, 'dist/BasicApp.app') if '--alias' in self.py2app_args: self.assertFalse(os.path.exists(os.path.join(path, 'Contents', 'Frameworks', 'libshared.1.dylib'))) self.assertFalse(os.path.exists(os.path.join(path, 'Contents', 'Frameworks', 'libshared.dylib'))) self.assertFalse(os.path.exists(os.path.join(path, 'Contents', 'Frameworks', 'libhalf.dylib'))) else: self.assertTrue(os.path.isfile(os.path.join(path, 'Contents', 'Frameworks', 'libshared.1.dylib'))) self.assertTrue(os.path.islink(os.path.join(path, 'Contents', 'Frameworks', 'libshared.dylib'))) self.assertEqual(os.readlink(os.path.join(path, 'Contents', 'Frameworks', 'libshared.dylib')), 'libshared.1.dylib') self.assertTrue(os.path.isfile(os.path.join(path, 'Contents', 'Frameworks', 'libhalf.dylib'))) class TestBasicAliasAppWithExtension (TestBasicAppWithExtension): py2app_args = [ '--alias', ] class TestBasicSemiStandaloneAppWithExtension (TestBasicAppWithExtension): py2app_args = [ '--semi-standalone', ] if __name__ == "__main__": unittest.main()
[]
[]
[]
[]
[]
python
0
0
cli/integration_test/v2/init.go
package v2 import ( "os" "testing" "github.com/hasura/graphql-engine/cli" "github.com/hasura/graphql-engine/cli/commands" ) func TestInitCmd(t *testing.T, ec *cli.ExecutionContext, initDir string) { tt := []struct { name string opts *commands.InitOptions err error }{ {"only-init-dir", &commands.InitOptions{ EC: ec, Version: cli.V2, Endpoint: os.Getenv("HASURA_GRAPHQL_TEST_ENDPOINT"), AdminSecret: os.Getenv("HASURA_GRAPHQL_TEST_ADMIN_SECRET"), InitDir: initDir, }, nil}, } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { err := tc.opts.Run() if err != tc.err { t.Fatalf("%s: expected %v, got %v", tc.name, tc.err, err) } // TODO: (shahidhk) need to verify the contents of the spec generated }) } }
[ "\"HASURA_GRAPHQL_TEST_ENDPOINT\"", "\"HASURA_GRAPHQL_TEST_ADMIN_SECRET\"" ]
[]
[ "HASURA_GRAPHQL_TEST_ENDPOINT", "HASURA_GRAPHQL_TEST_ADMIN_SECRET" ]
[]
["HASURA_GRAPHQL_TEST_ENDPOINT", "HASURA_GRAPHQL_TEST_ADMIN_SECRET"]
go
2
0
discordbot.py
from discord.ext import commands import os import traceback #from dispander import dispand bot = commands.Bot(command_prefix='/') token = os.environ['DISCORD_BOT_TOKEN'] @bot.event async def on_command_error(ctx, error): orig_error = getattr(error, "original", error) error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format()) await ctx.send(error_msg) @bot.command() async def ping(ctx): await ctx.send('pong') """ @client.event async def on_message(message): if message.author.bot: return await dispand(message) """ bot.run(token)
[]
[]
[ "DISCORD_BOT_TOKEN" ]
[]
["DISCORD_BOT_TOKEN"]
python
1
0
pkg/pd/pd_test.go
package pd_test import ( "fmt" "github.com/ManuelReschke/go-pd/pkg/pd" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "os" "testing" ) const SkipIntegrationTest = "skipping integration test" // TestPD_UploadPOST is a unit test for the POST upload method func TestPD_UploadPOST(t *testing.T) { server := pd.MockFileUploadServer() defer server.Close() testURL := server.URL + "/file" req := &pd.RequestUpload{ PathToFile: "testdata/cat.jpg", FileName: "test_post_cat.jpg", Anonymous: true, URL: testURL, } c := pd.New(nil, nil) rsp, err := c.UploadPOST(req) if err != nil { t.Error(err) } assert.Equal(t, 201, rsp.StatusCode) assert.NotEmpty(t, rsp.ID) assert.Equal(t, "https://pixeldrain.com/u/123456", rsp.GetFileURL()) fmt.Println("POST Req: " + rsp.GetFileURL()) } // TestPD_UploadPOST_Integration run a real integration test against the service func TestPD_UploadPOST_Integration(t *testing.T) { if testing.Short() { t.Skip(SkipIntegrationTest) } req := &pd.RequestUpload{ PathToFile: "testdata/cat.jpg", FileName: "test_post_cat.jpg", } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.UploadPOST(req) if err != nil { t.Error(err) } assert.Equal(t, 201, rsp.StatusCode) assert.NotEmpty(t, rsp.ID) fmt.Println("POST Req: " + rsp.GetFileURL()) } // TestPD_UploadPUT is a unit test for the PUT upload method func TestPD_UploadPUT(t *testing.T) { server := pd.MockFileUploadServer() defer server.Close() testURL := server.URL + "/file/" req := &pd.RequestUpload{ PathToFile: "testdata/cat.jpg", FileName: "test_put_cat.jpg", Anonymous: true, URL: testURL + "test_put_cat.jpg", } c := pd.New(nil, nil) rsp, err := c.UploadPUT(req) if err != nil { t.Error(err) } assert.Equal(t, 201, rsp.StatusCode) assert.NotEmpty(t, rsp.ID) assert.Equal(t, "https://pixeldrain.com/u/123456", rsp.GetFileURL()) fmt.Println("PUT Req: " + rsp.GetFileURL()) } // TestPD_UploadPUT_Integration run a real integration test against the service func TestPD_UploadPUT_Integration(t *testing.T) { if testing.Short() { t.Skip(SkipIntegrationTest) } req := &pd.RequestUpload{ PathToFile: "testdata/cat.jpg", FileName: "test_put_cat.jpg", } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.UploadPUT(req) if err != nil { t.Error(err) } assert.Equal(t, 201, rsp.StatusCode) assert.NotEmpty(t, rsp.ID) fmt.Println("PUT Req: " + rsp.GetFileURL()) } // TestPD_Download is a unit test for the GET "download" method func TestPD_Download(t *testing.T) { server := pd.MockFileUploadServer() defer server.Close() testURL := server.URL + "/file/K1dA8U5W" req := &pd.RequestDownload{ PathToSave: "testdata/cat_download.jpg", ID: "K1dA8U5W", URL: testURL, } c := pd.New(nil, nil) rsp, err := c.Download(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, true, rsp.Success) } // TestPD_Download_Integration run a real integration test against the service func TestPD_Download_Integration(t *testing.T) { if testing.Short() { t.Skip(SkipIntegrationTest) } req := &pd.RequestDownload{ PathToSave: "testdata/cat_download.jpg", ID: "K1dA8U5W", } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.Download(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, "cat_download.jpg", rsp.FileName) assert.Equal(t, int64(37621), rsp.FileSize) } // TestPD_GetFileInfo is a unit test for the GET "file info" method func TestPD_GetFileInfo(t *testing.T) { server := pd.MockFileUploadServer() defer server.Close() testURL := server.URL + "/file/K1dA8U5W/info" req := &pd.RequestFileInfo{ ID: "K1dA8U5W", URL: testURL, } c := pd.New(nil, nil) rsp, err := c.GetFileInfo(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, true, rsp.Success) assert.Equal(t, "K1dA8U5W", rsp.ID) assert.Equal(t, 37621, rsp.Size) assert.Equal(t, "1af93d68009bdfd52e1da100a019a30b5fe083d2d1130919225ad0fd3d1fed0b", rsp.HashSha256) } // TestPD_GetFileInfo_Integration run a real integration test against the service func TestPD_GetFileInfo_Integration(t *testing.T) { if testing.Short() { t.Skip(SkipIntegrationTest) } req := &pd.RequestFileInfo{ ID: "K1dA8U5W", } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.GetFileInfo(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, true, rsp.Success) assert.Equal(t, "K1dA8U5W", rsp.ID) assert.Equal(t, 37621, rsp.Size) assert.Equal(t, "1af93d68009bdfd52e1da100a019a30b5fe083d2d1130919225ad0fd3d1fed0b", rsp.HashSha256) } // TestPD_DownloadThumbnail is a unit test for the GET "download thumbnail" method func TestPD_DownloadThumbnail(t *testing.T) { server := pd.MockFileUploadServer() defer server.Close() testURL := server.URL + "/file/K1dA8U5W/thumbnail?width=64&height=64" req := &pd.RequestThumbnail{ ID: "K1dA8U5W", Height: "64", Width: "64", PathToSave: "testdata/cat_download_thumbnail.jpg", URL: testURL, } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.DownloadThumbnail(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, "cat_download_thumbnail.jpg", rsp.FileName) assert.Equal(t, int64(7056), rsp.FileSize) } // TestPD_DownloadThumbnail_Integration run a real integration test against the service func TestPD_DownloadThumbnail_Integration(t *testing.T) { if testing.Short() { t.Skip(SkipIntegrationTest) } req := &pd.RequestThumbnail{ ID: "K1dA8U5W", Height: "64", Width: "64", PathToSave: "testdata/cat_download_thumbnail.jpg", } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.DownloadThumbnail(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, "cat_download_thumbnail.jpg", rsp.FileName) assert.Equal(t, int64(7056), rsp.FileSize) } // TestPD_Delete is a unit test for the DELETE "delete" method func TestPD_Delete(t *testing.T) { server := pd.MockFileUploadServer() defer server.Close() testURL := server.URL + "/file/K1dA8U5W" req := &pd.RequestDelete{ ID: "K1dA8U5W", URL: testURL, } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.Delete(req) if err != nil { t.Error(err) } assert.Equal(t, true, rsp.Success) assert.Equal(t, "file_deleted", rsp.Value) assert.Equal(t, "The file has been deleted.", rsp.Message) } // TestPD_Delete_Integration run a real integration test against the service func TestPD_Delete_Integration(t *testing.T) { if testing.Short() { t.Skip(SkipIntegrationTest) } req := &pd.RequestDelete{ ID: "123", // K1dA8U5W } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.Delete(req) if err != nil { t.Error(err) } assert.Equal(t, false, rsp.Success) assert.Equal(t, "not_found", rsp.Value) assert.Equal(t, "The entity you requested could not be found", rsp.Message) } // TestPD_CreateList is a unit test for the POST "list" method func TestPD_CreateList(t *testing.T) { server := pd.MockFileUploadServer() defer server.Close() testURL := server.URL + "/list" // files to add files := []pd.ListFile{ {ID: "K1dA8U5W", Description: "Hallo Welt"}, {ID: "bmrc4iyD", Description: "Hallo Welt 2"}, } // create list request req := &pd.RequestCreateList{ Title: "Test List", Anonymous: false, Files: files, URL: testURL, } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.CreateList(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, true, rsp.Success) assert.NotEmpty(t, rsp.ID) } // TestPD_Delete_Integration run a real integration test against the service func TestPD_CreateList_Integration(t *testing.T) { if testing.Short() { t.Skip(SkipIntegrationTest) } // files to add files := []pd.ListFile{ {ID: "123456", Description: "Hallo Welt"}, {ID: "678900", Description: "Hallo Welt 2"}, } // create list request req := &pd.RequestCreateList{ Title: "Test List", Anonymous: false, Files: files, } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.CreateList(req) if err != nil { t.Error(err) } assert.Equal(t, 422, rsp.StatusCode) assert.Equal(t, false, rsp.Success) assert.Equal(t, "list_file_not_found", rsp.Value) assert.Equal(t, "File was not found in the database", rsp.Message) } // TestPD_GetList is a unit test for the GET "list/{id}" method func TestPD_GetList(t *testing.T) { server := pd.MockFileUploadServer() defer server.Close() testURL := server.URL + "/list/123" req := &pd.RequestGetList{ ID: "123", URL: testURL, } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.GetList(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, true, rsp.Success) assert.NotEmpty(t, rsp.ID) assert.Equal(t, "Rust in Peace", rsp.Title) assert.Equal(t, 123456, rsp.Files[0].Size) } // TestPD_GetList_Integration run a real integration test against the service func TestPD_GetList_Integration(t *testing.T) { if testing.Short() { t.Skip(SkipIntegrationTest) } req := &pd.RequestGetList{ ID: "Cap4T1LP", } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.GetList(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, true, rsp.Success) assert.NotEmpty(t, rsp.ID) assert.Equal(t, "Test List", rsp.Title) assert.Equal(t, 37621, rsp.Files[0].Size) } // TestPD_GetUserFiles is a unit test for the GET "/user/files" method func TestPD_GetUserFiles(t *testing.T) { server := pd.MockFileUploadServer() defer server.Close() testURL := server.URL + "/user/files" req := &pd.RequestGetUserFiles{ URL: testURL, } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.GetUserFiles(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, true, rsp.Success) assert.Equal(t, "tUxgDCoQ", rsp.Files[0].ID) assert.Equal(t, "test_post_cat.jpg", rsp.Files[0].Name) } // TestPD_GetUserFiles_Integration run a real integration test against the service func TestPD_GetUserFiles_Integration(t *testing.T) { if testing.Short() { t.Skip(SkipIntegrationTest) } req := &pd.RequestGetUserFiles{} req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.GetUserFiles(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, true, rsp.Success) assert.Equal(t, "tUxgDCoQ", rsp.Files[0].ID) } // TestPD_GetUserLists is a unit test for the GET "/user/files" method func TestPD_GetUserLists(t *testing.T) { server := pd.MockFileUploadServer() defer server.Close() testURL := server.URL + "/user/lists" req := &pd.RequestGetUserLists{ URL: testURL, } req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.GetUserLists(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, true, rsp.Success) assert.Equal(t, "Test List", rsp.Lists[0].Title) } // TestPD_GetUserLists_Integration run a real integration test against the service func TestPD_GetUserLists_Integration(t *testing.T) { if testing.Short() { t.Skip(SkipIntegrationTest) } req := &pd.RequestGetUserLists{} req.Auth = setAuthFromEnv() c := pd.New(nil, nil) rsp, err := c.GetUserLists(req) if err != nil { t.Error(err) } assert.Equal(t, 200, rsp.StatusCode) assert.Equal(t, true, rsp.Success) assert.Equal(t, "Test List", rsp.Lists[0].Title) } func setAuthFromEnv() pd.Auth { // load api key from .env_test file currentWorkDirectory, _ := os.Getwd() _ = godotenv.Load(currentWorkDirectory + "/.env_test") apiKey := os.Getenv("API_KEY") return pd.Auth{ APIKey: apiKey, } }
[ "\"API_KEY\"" ]
[]
[ "API_KEY" ]
[]
["API_KEY"]
go
1
0
main.go
package main import ( "context" "flag" "fmt" stdlog "log" "net/http" "net/url" "os" "os/signal" "runtime" "strings" "syscall" "time" rbacproxytls "github.com/brancz/kube-rbac-proxy/pkg/tls" "github.com/go-chi/chi" "github.com/go-chi/chi/middleware" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/metalmatze/signal/healthcheck" "github.com/metalmatze/signal/internalserver" "github.com/oklog/run" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/version" "go.uber.org/automaxprocs/maxprocs" "github.com/observatorium/observatorium/internal" metricslegacy "github.com/observatorium/observatorium/internal/api/metrics/legacy" metricsv1 "github.com/observatorium/observatorium/internal/api/metrics/v1" "github.com/observatorium/observatorium/internal/server" "github.com/observatorium/observatorium/internal/tls" ) type config struct { logLevel string logFormat string debug debugConfig server serverConfig tls tlsConfig proxy proxyConfig metrics metricsConfig } type debugConfig struct { mutexProfileFraction int blockProfileRate int name string } type serverConfig struct { listen string listenInternal string healthcheckURL string gracePeriod time.Duration } type tlsConfig struct { certFile string keyFile string clientCAFile string minVersion string cipherSuites []string reloadInterval time.Duration } type proxyConfig struct { bufferSizeBytes int bufferCount int flushInterval time.Duration } type metricsConfig struct { readEndpoint *url.URL writeEndpoint *url.URL } func main() { cfg, err := parseFlags(log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))) if err != nil { stdlog.Fatalf("parse flag: %v", err) } logger := internal.NewLogger(cfg.logLevel, cfg.logFormat, cfg.debug.name) defer level.Info(logger).Log("msg", "exiting") reg := prometheus.NewRegistry() reg.MustRegister( version.NewCollector("observatorium"), prometheus.NewGoCollector(), prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), ) healthchecks := healthcheck.NewMetricsHandler(healthcheck.NewHandler(), reg) debug := os.Getenv("DEBUG") != "" if debug { runtime.SetMutexProfileFraction(cfg.debug.mutexProfileFraction) runtime.SetBlockProfileRate(cfg.debug.blockProfileRate) } // Running in container with limits but with empty/wrong value of GOMAXPROCS env var could lead to throttling by cpu // maxprocs will automate adjustment by using cgroups info about cpu limit if it set as value for runtime.GOMAXPROCS undo, err := maxprocs.Set(maxprocs.Logger(func(template string, args ...interface{}) { level.Debug(logger).Log("msg", fmt.Sprintf(template, args)) })) if err != nil { level.Error(logger).Log("msg", "failed to set GOMAXPROCS:", "err", err) } defer undo() level.Info(logger).Log("msg", "starting observatorium") var g run.Group { // Signal channels must be buffered. sig := make(chan os.Signal, 1) g.Add(func() error { signal.Notify(sig, os.Interrupt, syscall.SIGTERM) <-sig level.Info(logger).Log("msg", "caught interrupt") return nil }, func(_ error) { close(sig) }) } tlsConfig, err := tls.NewServerConfig( log.With(logger, "protocol", "HTTP"), cfg.tls.certFile, cfg.tls.keyFile, cfg.tls.clientCAFile, cfg.tls.minVersion, cfg.tls.cipherSuites, ) if err != nil { stdlog.Fatalf("failed to initialize tls config: %v", err) } { if tlsConfig != nil { r, err := rbacproxytls.NewCertReloader( cfg.tls.certFile, cfg.tls.keyFile, cfg.tls.reloadInterval, ) if err != nil { stdlog.Fatalf("failed to initialize certificate reloader: %v", err) } tlsConfig.GetCertificate = r.GetCertificate ctx, cancel := context.WithCancel(context.Background()) g.Add(func() error { return r.Watch(ctx) }, func(error) { cancel() }) } if cfg.server.healthcheckURL != "" { // checks if server is up healthchecks.AddLivenessCheck("http", healthcheck.HTTPCheck( cfg.server.healthcheckURL, http.MethodGet, http.StatusMovedPermanently, time.Second, ), ) // checks if upstream is reachable through server proxy healthchecks.AddReadinessCheck("http-proxy", healthcheck.HTTPGetCheck( cfg.server.healthcheckURL+"/api/metrics/v1/graph", time.Second, ), ) } r := chi.NewRouter() r.Use(middleware.RequestID) r.Use(middleware.RealIP) r.Use(middleware.Recoverer) r.Use(middleware.StripSlashes) r.Use(middleware.Timeout(2 * time.Minute)) // best set per handler r.Use(server.Logger(logger)) ins := server.NewInstrumentationMiddleware(reg) r.Mount("/api/v1", metricslegacy.NewHandler( cfg.metrics.readEndpoint, metricslegacy.Logger(logger), metricslegacy.Registry(reg), metricslegacy.HandlerInstrumenter(ins), )) r.Mount("/api/metrics/v1", http.StripPrefix("/api/metrics/v1", metricsv1.NewHandler( cfg.metrics.readEndpoint, cfg.metrics.writeEndpoint, metricsv1.Logger(logger), metricsv1.Registry(reg), metricsv1.HandlerInstrumenter(ins), ), ), ) r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/api/metrics/v1/graph", http.StatusMovedPermanently) }) s := http.Server{ Addr: cfg.server.listen, Handler: r, TLSConfig: tlsConfig, ReadTimeout: 2 * time.Minute, // best set per handler WriteTimeout: 2 * time.Minute, // best set per handler } g.Add(func() error { level.Info(logger).Log("msg", "starting the HTTP server", "address", cfg.server.listen) if tlsConfig != nil { // certFile and keyFile passed in TLSConfig at initialization. return s.ListenAndServeTLS("", "") } return s.ListenAndServe() }, func(err error) { // gracePeriod is duration the server gracefully shuts down. const gracePeriod = 2 * time.Minute ctx, cancel := context.WithTimeout(context.Background(), gracePeriod) defer cancel() level.Info(logger).Log("msg", "shutting down the HTTP server") _ = s.Shutdown(ctx) }) } { h := internalserver.NewHandler( internalserver.WithName("Internal - Observatorium API"), internalserver.WithHealthchecks(healthchecks), internalserver.WithPrometheusRegistry(reg), internalserver.WithPProf(), ) s := http.Server{ Addr: cfg.server.listenInternal, Handler: h, } g.Add(func() error { level.Info(logger).Log("msg", "starting internal HTTP server", "address", s.Addr) return s.ListenAndServe() }, func(err error) { _ = s.Shutdown(context.Background()) }) } if err := g.Run(); err != nil { stdlog.Fatal(err) } } func parseFlags(logger log.Logger) (config, error) { var ( rawTLSCipherSuites string rawMetricsReadEndpoint string rawMetricsWriteEndpoint string ) cfg := config{} flag.StringVar(&cfg.debug.name, "debug.name", "observatorium", "The name to add as prefix to log lines.") flag.IntVar(&cfg.debug.mutexProfileFraction, "debug.mutex-profile-fraction", 10, "The parameter which controls the fraction of mutex contention events that are reported in the mutex profile.") flag.IntVar(&cfg.debug.blockProfileRate, "debug.block-profile-rate", 10, "The parameter controls the fraction of goroutine blocking events that are reported in the blocking profile.") flag.StringVar(&cfg.logLevel, "log.level", "info", "The log filtering level. Options: 'error', 'warn', 'info', 'debug'.") flag.StringVar(&cfg.logFormat, "log.format", internal.LogFormatLogfmt, "The log format to use. Options: 'logfmt', 'json'.") flag.StringVar(&cfg.server.listen, "web.listen", ":8080", "The address on which public server runs.") flag.StringVar(&cfg.server.listenInternal, "web.internal.listen", ":8081", "The address on which internal server runs.") flag.StringVar(&cfg.server.healthcheckURL, "web.healthchecks.url", "http://localhost:8080", "The URL on which public server runs and to run healthchecks against.") flag.StringVar(&rawMetricsReadEndpoint, "metrics.read.endpoint", "", "The endpoint against which to send read requests for metrics. It used as a fallback to 'query.endpoint' and 'query-range.endpoint'.") flag.StringVar(&rawMetricsWriteEndpoint, "metrics.write.endpoint", "", "The endpoint against which to make write requests for metrics.") flag.StringVar(&cfg.tls.certFile, "tls-cert-file", "", "File containing the default x509 Certificate for HTTPS. Leave blank to disable TLS.") flag.StringVar(&cfg.tls.keyFile, "tls-private-key-file", "", "File containing the default x509 private key matching --tls-cert-file. Leave blank to disable TLS.") flag.StringVar(&cfg.tls.clientCAFile, "tls-client-ca-file", "", "File containing the TLS CA to verify clients against."+ "If no client CA is specified, there won't be any client verification on server side.") flag.StringVar(&cfg.tls.minVersion, "tls-min-version", "VersionTLS13", "Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants.") flag.StringVar(&rawTLSCipherSuites, "tls-cipher-suites", "", "Comma-separated list of cipher suites for the server."+ " Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants)."+ "If omitted, the default Go cipher suites will be used."+ "Note that TLS 1.3 ciphersuites are not configurable.") flag.DurationVar(&cfg.tls.reloadInterval, "tls-reload-interval", time.Minute, "The interval at which to watch for TLS certificate changes, by default set to 1 minute.") flag.Parse() metricsReadEndpoint, err := url.ParseRequestURI(rawMetricsReadEndpoint) if err != nil { return cfg, fmt.Errorf("--metrics.read.endpoint is invalid, raw %s: %w", rawMetricsReadEndpoint, err) } cfg.metrics.readEndpoint = metricsReadEndpoint metricsWriteEndpoint, err := url.ParseRequestURI(rawMetricsWriteEndpoint) if err != nil { return cfg, fmt.Errorf("--metrics.write.endpoint is invalid, raw %s: %w", rawMetricsWriteEndpoint, err) } cfg.metrics.writeEndpoint = metricsWriteEndpoint if rawTLSCipherSuites != "" { cfg.tls.cipherSuites = strings.Split(rawTLSCipherSuites, ",") } return cfg, nil }
[ "\"DEBUG\"" ]
[]
[ "DEBUG" ]
[]
["DEBUG"]
go
1
0
inflateorg/inflateorg.py
""" inflateorg.py Inflate or shrink the membrane to resolve clash between membrane and protein. Handles the primary functions """ import os import shutil from subprocess import call from pkg_resources import resource_filename import MDAnalysis as mda import numpy as np os.environ["GMX_MAXBACKUP"] = "-1" gromacs = '/usr/local/gromacs/2018.8/bin/gmx' mdp = resource_filename(__name__, 'data/minim.mdp') grompp = '{gromacs} grompp -f minim.mdp -c {gro} -p {topol} -o em.tpr -maxwarn {maxwarn} -r {gro}' mdrun = '{gromacs} mdrun -deffnm em' trjconv_pbc = 'echo 0 | {gromacs} trjconv -f em.gro -s em.tpr -o em.gro -pbc mol' trjconv_check = 'echo 0 | {gromacs} trjconv -f pre_inflation.gro -s em.tpr -o em.gro -pbc mol' class InflateORG(): def __init__(self, start_file='start.gro', topol='topol.top', center='protein', mobile='not protein', sep=None, scaling_factor = 0.95, dim = [1,1,0], cutoff=1, maxwarn=0): ''' :param start_file: The coordinate file for the InflateAny program. :param topol: The topology file compatible with the gromacs program :param center: The center of the inflation which is not modified :param mobile: The peripheral which will be expanded and shirked :param sep: Define how to separate the peripheral. :param scaling_factor: The factor of inflation at each :param dim: The dimension of the scaling on x, y and z axis. default is 1,1,1. :param cutoff: Cutoff distance where two particles are considered as separate. ''' self.start_file = start_file self.topol = topol self.center = center self.mobile = mobile self.sep = sep self.scaling_factor = scaling_factor self.dim = np.array(dim) self.cutoff =cutoff self.maxwarn = maxwarn self.sanity_check() self.inflate_system() self.shrink_system() def sanity_check(self): ''' Check if the input is correct. ''' u = mda.Universe(self.start_file) u.select_atoms('({}) or ({})'.format(self.center, self.mobile)).write('pre_inflation.gro') try: call(grompp.format(gromacs=gromacs, gro='pre_inflation.gro', topol=self.topol, maxwarn=self.maxwarn), shell=True) call(trjconv_check.format(gromacs=gromacs), shell=True) shutil.move('em.gro', 'pre_inflation.gro') except: print('Make sure the mdp file (minim.mdp), the topology file ({}) and the input coordinate file ({}) is correct.'.format( self.topol, self.start_file )) os.mkdir('InflateAny') os.remove('em.tpr') shutil.move('pre_inflation.gro', 'InflateAny/pre_inflation.gro') shutil.copy('minim.mdp', 'InflateAny/minim.mdp') os.chdir('InflateAny') self.topol = '../' + self.topol def separate_molecule(self, selection=None, sep=None): ''' Separate the selection into defferent entities. :param u: input universe :return: A list of the atom groups which will be scaled. ''' if self.sep is None: # By default the separtion is based on residue id mobile_parts = u.select_atoms(self.mobile).residues return [residue.atoms for residue in mobile_parts] else: return [u.select_atoms(part) for part in self.sep] def inflate(self, u, scaling_factor): ''' :param u: the input MDAnalysis Universe to which scaling will be applied ''' # dimensions = u.dimensions # center_of_geometry = u.select_atoms(self.center).center_of_geometry() # u.atoms.translate(-center_of_geometry) # # for part in self.separate_mobile(u): # vector = part.center_of_geometry() # part.translate((vector * (scaling_factor - 1))*self.dim) # u.atoms.translate(center_of_geometry * scaling_factor) # dimensions[:3] = dimensions[:3] * scaling_factor # u.dimensions = dimensions # return u def inflate_system(self): ''' Inflate the system. :return: ''' u = mda.Universe('pre_inflation.gro') repeat = True count = 0 print('Start inflating the {}'.format(self.mobile)) while repeat: count += 1 u = self.inflate(u, 1 / self.scaling_factor) check = u.select_atoms('{} and around {} ({})'.format(self.mobile, self.cutoff, self.center)) print('Interation {}:'.format(count)) print('Atoms with {}A of {}:'.format(self.cutoff, self.center)) print(check) if len(check) == 0: 'No atom in contact with {}.'.format(self.center) repeat = False print('Begin the shrinking process.') u.atoms.write('inflated.gro') call(grompp.format(gromacs=gromacs, gro='inflated.gro', topol=self.topol, maxwarn=self.maxwarn), shell=True) self.mdrun(gromacs=gromacs) call(trjconv_pbc.format(gromacs=gromacs), shell=True) shutil.copy('em.gro', 'inflated_em.gro') self.count = count def shrink_system(self): for i in range(self.count): print('Interation {}:'.format(i)) u = mda.Universe('em.gro') u = self.inflate(u, self.scaling_factor) u.atoms.write('shrinked_{}.gro'.format(i)) call(grompp.format(gromacs=gromacs, gro='shrinked_{}.gro'.format(i), topol=self.topol, maxwarn=self.maxwarn), shell=True) self.mdrun(gromacs=gromacs) call(trjconv_pbc.format(gromacs=gromacs), shell=True) shutil.copy2('em.gro', 'equilibrated.gro') shutil.copy2('em.gro', '../equilibrated.gro') os.chdir('../') def mdrun(self, gromacs=gromacs, additional=''): # Try to get around the situation where opencl won't start repeat = True while repeat: returncode = call(mdrun.format(gromacs=gromacs) + ' ' + additional, shell=True) if returncode == 0: repeat = False
[]
[]
[ "GMX_MAXBACKUP" ]
[]
["GMX_MAXBACKUP"]
python
1
0
Data/Juliet-Java/Juliet-Java-v103/000/137/144/CWE36_Absolute_Path_Traversal__Environment_08.java
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE36_Absolute_Path_Traversal__Environment_08.java Label Definition File: CWE36_Absolute_Path_Traversal.label.xml Template File: sources-sink-08.tmpl.java */ /* * @description * CWE: 36 Absolute Path Traversal * BadSource: Environment Read data from an environment variable * GoodSource: A hardcoded string * BadSink: readFile read line from file from disk * Flow Variant: 08 Control flow: if(privateReturnsTrue()) and if(privateReturnsFalse()) * * */ import java.io.*; import java.util.logging.Level; public class CWE36_Absolute_Path_Traversal__Environment_08 extends AbstractTestCase { /* The methods below always return the same value, so a tool * should be able to figure out that every call to these * methods will return true or return false. */ private boolean privateReturnsTrue() { return true; } private boolean privateReturnsFalse() { return false; } /* uses badsource and badsink */ public void bad() throws Throwable { String data; if (privateReturnsTrue()) { /* get environment variable ADD */ /* POTENTIAL FLAW: Read data from an environment variable */ data = System.getenv("ADD"); } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = null; } /* POTENTIAL FLAW: unvalidated or sandboxed value */ if (data != null) { File file = new File(data); FileInputStream streamFileInputSink = null; InputStreamReader readerInputStreamSink = null; BufferedReader readerBufferdSink = null; if (file.exists() && file.isFile()) { try { streamFileInputSink = new FileInputStream(file); readerInputStreamSink = new InputStreamReader(streamFileInputSink, "UTF-8"); readerBufferdSink = new BufferedReader(readerInputStreamSink); IO.writeLine(readerBufferdSink.readLine()); } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error with stream reading", exceptIO); } finally { /* Close stream reading objects */ try { if (readerBufferdSink != null) { readerBufferdSink.close(); } } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error closing BufferedReader", exceptIO); } try { if (readerInputStreamSink != null) { readerInputStreamSink.close(); } } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error closing InputStreamReader", exceptIO); } try { if (streamFileInputSink != null) { streamFileInputSink.close(); } } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error closing FileInputStream", exceptIO); } } } } } /* goodG2B1() - use goodsource and badsink by changing privateReturnsTrue() to privateReturnsFalse() */ private void goodG2B1() throws Throwable { String data; if (privateReturnsFalse()) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = null; } else { /* FIX: Use a hardcoded string */ data = "foo"; } /* POTENTIAL FLAW: unvalidated or sandboxed value */ if (data != null) { File file = new File(data); FileInputStream streamFileInputSink = null; InputStreamReader readerInputStreamSink = null; BufferedReader readerBufferdSink = null; if (file.exists() && file.isFile()) { try { streamFileInputSink = new FileInputStream(file); readerInputStreamSink = new InputStreamReader(streamFileInputSink, "UTF-8"); readerBufferdSink = new BufferedReader(readerInputStreamSink); IO.writeLine(readerBufferdSink.readLine()); } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error with stream reading", exceptIO); } finally { /* Close stream reading objects */ try { if (readerBufferdSink != null) { readerBufferdSink.close(); } } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error closing BufferedReader", exceptIO); } try { if (readerInputStreamSink != null) { readerInputStreamSink.close(); } } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error closing InputStreamReader", exceptIO); } try { if (streamFileInputSink != null) { streamFileInputSink.close(); } } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error closing FileInputStream", exceptIO); } } } } } /* goodG2B2() - use goodsource and badsink by reversing statements in if */ private void goodG2B2() throws Throwable { String data; if (privateReturnsTrue()) { /* FIX: Use a hardcoded string */ data = "foo"; } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = null; } /* POTENTIAL FLAW: unvalidated or sandboxed value */ if (data != null) { File file = new File(data); FileInputStream streamFileInputSink = null; InputStreamReader readerInputStreamSink = null; BufferedReader readerBufferdSink = null; if (file.exists() && file.isFile()) { try { streamFileInputSink = new FileInputStream(file); readerInputStreamSink = new InputStreamReader(streamFileInputSink, "UTF-8"); readerBufferdSink = new BufferedReader(readerInputStreamSink); IO.writeLine(readerBufferdSink.readLine()); } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error with stream reading", exceptIO); } finally { /* Close stream reading objects */ try { if (readerBufferdSink != null) { readerBufferdSink.close(); } } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error closing BufferedReader", exceptIO); } try { if (readerInputStreamSink != null) { readerInputStreamSink.close(); } } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error closing InputStreamReader", exceptIO); } try { if (streamFileInputSink != null) { streamFileInputSink.close(); } } catch (IOException exceptIO) { IO.logger.log(Level.WARNING, "Error closing FileInputStream", exceptIO); } } } } } public void good() throws Throwable { goodG2B1(); goodG2B2(); } /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ public static void main(String[] args) throws ClassNotFoundException, InstantiationException, IllegalAccessException { mainFromParent(args); } }
[ "\"ADD\"" ]
[]
[ "ADD" ]
[]
["ADD"]
java
1
0
jina/__init__.py
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" # do not change this line manually # this is managed by git tag and updated on every release __version__ = '0.6.8' # do not change this line manually # this is managed by proto/build-proto.sh and updated on every execution __proto_version__ = '0.0.63' import platform import sys # do some os-wise patches if sys.version_info < (3, 7, 0): raise OSError('Jina requires Python 3.7 and above, but yours is %s' % sys.version_info) if sys.version_info >= (3, 8, 0) and platform.system() == 'Darwin': # temporary fix for python 3.8 on macos where the default start is set to "spawn" # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods from multiprocessing import set_start_method set_start_method('fork') from datetime import datetime from types import SimpleNamespace import os # fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES' __uptime__ = datetime.now().strftime('%Y%m%d%H%M%S') # update on MacOS # 1. clean this tuple, # 2. grep -ohE "\'JINA_.*?\'" **/*.py | sort -u | sed "s/$/,/g" # 3. copy all lines EXCEPT the first (which is the grep command in the last line) __jina_env__ = ('JINA_ARRAY_QUANT', 'JINA_BINARY_DELIMITER', 'JINA_CONTRIB_MODULE', 'JINA_CONTRIB_MODULE_IS_LOADING', 'JINA_CONTROL_PORT', 'JINA_DB_COLLECTION', 'JINA_DB_HOSTNAME', 'JINA_DB_NAME', 'JINA_DB_PASSWORD', 'JINA_DB_USERNAME', 'JINA_DEFAULT_HOST', 'JINA_DISABLE_UVLOOP', 'JINA_EXECUTOR_WORKDIR', 'JINA_FULL_CLI', 'JINA_IPC_SOCK_TMP', 'JINA_LOG_CONFIG', 'JINA_LOG_NO_COLOR', 'JINA_POD_NAME', 'JINA_PROFILING', 'JINA_RANDOM_PORTS', 'JINA_SOCKET_HWM', 'JINA_TEST_GPU', 'JINA_TEST_PRETRAINED', 'JINA_VCS_VERSION', 'JINA_WARN_UNNAMED') __default_host__ = os.environ.get('JINA_DEFAULT_HOST', '0.0.0.0') __ready_msg__ = 'ready and listening' __stop_msg__ = 'terminated' __unable_to_load_pretrained_model_msg__ = 'Executor depending on pretrained model file could not find the pretrained model' __binary_delimiter__ = os.environ.get('JINA_BINARY_DELIMITER', '460841a0a8a430ae25d9ad7c1f048c57').encode() JINA_GLOBAL = SimpleNamespace() JINA_GLOBAL.imported = SimpleNamespace() JINA_GLOBAL.imported.executors = False JINA_GLOBAL.imported.drivers = False JINA_GLOBAL.imported.hub = False JINA_GLOBAL.logserver = SimpleNamespace() def import_classes(namespace: str, targets=None, show_import_table: bool = False, import_once: bool = False): """ Import all or selected executors into the runtime. This is called when Jina is first imported for registering the YAML constructor beforehand. It can be also used to import third-part or external executors. :param namespace: the namespace to import :param targets: the list of executor names to import :param show_import_table: show the import result as a table :param import_once: import everything only once, to avoid repeated import """ import os, re from .logging import default_logger if namespace == 'jina.executors': import_type = 'ExecutorType' if import_once and JINA_GLOBAL.imported.executors: return elif namespace == 'jina.drivers': import_type = 'DriverType' if import_once and JINA_GLOBAL.imported.drivers: return elif namespace == 'jina.hub': import_type = 'ExecutorType' if import_once and JINA_GLOBAL.imported.hub: return else: raise TypeError(f'namespace: {namespace} is unrecognized') from setuptools import find_packages import pkgutil from pkgutil import iter_modules try: path = os.path.dirname(pkgutil.get_loader(namespace).path) except AttributeError: if namespace == 'jina.hub': default_logger.debug(f'hub submodule is not initialized. Please try "git submodule update --init"') return {} modules = set() for info in iter_modules([path]): if (namespace != 'jina.hub' and not info.ispkg) or (namespace == 'jina.hub' and info.ispkg): modules.add('.'.join([namespace, info.name])) for pkg in find_packages(path): modules.add('.'.join([namespace, pkg])) pkgpath = path + '/' + pkg.replace('.', '/') for info in iter_modules([pkgpath]): if (namespace != 'jina.hub' and not info.ispkg) or (namespace == 'jina.hub' and info.ispkg): modules.add('.'.join([namespace, pkg, info.name])) # filter ignored_module_pattern = r'\.tests|\.api|\.bump_version' modules = {m for m in modules if not re.findall(ignored_module_pattern, m)} from collections import defaultdict load_stat = defaultdict(list) bad_imports = [] if isinstance(targets, str): targets = {targets} elif isinstance(targets, list): targets = set(targets) elif targets is None: targets = {} else: raise TypeError(f'target must be a set, but received {targets!r}') depend_tree = {} import importlib from .helper import colored for m in modules: try: mod = importlib.import_module(m) for k in dir(mod): # import the class if (getattr(mod, k).__class__.__name__ == import_type) and (not targets or k in targets): try: _c = getattr(mod, k) load_stat[m].append( (k, True, colored('▸', 'green').join(f'{vvv.__name__}' for vvv in _c.mro()[:-1][::-1]))) d = depend_tree for vvv in _c.mro()[:-1][::-1]: if vvv.__name__ not in d: d[vvv.__name__] = {} d = d[vvv.__name__] d['module'] = m if k in targets: targets.remove(k) if not targets: return # target execs are all found and loaded, return try: # load the default request for this executor if possible from .executors.requests import get_default_reqs get_default_reqs(type.mro(getattr(mod, k))) except ValueError: pass except Exception as ex: load_stat[m].append((k, False, ex)) bad_imports.append('.'.join([m, k])) if k in targets: raise ex # target class is found but not loaded, raise return except Exception as ex: load_stat[m].append(('', False, ex)) bad_imports.append(m) if targets: raise ImportError(f'{targets} can not be found in jina') if show_import_table: from .helper import print_load_table, print_dep_tree_rst print_load_table(load_stat) else: if bad_imports: if namespace != 'jina.hub': default_logger.error( f'theses modules or classes can not be imported {bad_imports}. ' f'You can use `jina check` to list all executors and drivers') else: default_logger.warning( f'due to the missing dependencies or bad implementations, {bad_imports} can not be imported ' f'if you are using these executors/drivers, they wont work. ' f'You can use `jina check` to list all executors and drivers') if namespace == 'jina.executors': JINA_GLOBAL.imported.executors = True elif namespace == 'jina.drivers': JINA_GLOBAL.imported.drivers = True elif namespace == 'jina.hub': JINA_GLOBAL.imported.hub = True return depend_tree # driver first, as executor may contain driver import_classes('jina.drivers', show_import_table=False, import_once=True) import_classes('jina.executors', show_import_table=False, import_once=True) import_classes('jina.hub', show_import_table=False, import_once=True) # manually install the default signal handler import signal signal.signal(signal.SIGINT, signal.default_int_handler) def set_nofile(nofile_atleast=4096): """ sets nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256 temporary setting extinguishing with Python session. """ try: import resource as res except ImportError: # Windows res = None from .logging import default_logger if res is None: return (None,) * 2 soft, ohard = res.getrlimit(res.RLIMIT_NOFILE) hard = ohard if soft < nofile_atleast: soft = nofile_atleast if hard < soft: hard = soft default_logger.debug(f'setting soft & hard ulimit -n {soft} {hard}') try: res.setrlimit(res.RLIMIT_NOFILE, (soft, hard)) except (ValueError, res.error): try: hard = soft default_logger.warning(f'trouble with max limit, retrying with soft,hard {soft},{hard}') res.setrlimit(res.RLIMIT_NOFILE, (soft, hard)) except Exception: default_logger.warning('failed to set ulimit, giving up') soft, hard = res.getrlimit(res.RLIMIT_NOFILE) default_logger.debug(f'ulimit -n soft,hard: {soft} {hard}') return soft, hard set_nofile()
[]
[]
[ "JINA_BINARY_DELIMITER", "JINA_DEFAULT_HOST", "OBJC_DISABLE_INITIALIZE_FORK_SAFETY" ]
[]
["JINA_BINARY_DELIMITER", "JINA_DEFAULT_HOST", "OBJC_DISABLE_INITIALIZE_FORK_SAFETY"]
python
3
0
services/router.go
package services import ( "fmt" "log" "net/http" "os" "github.com/gorilla/mux" "github.com/pranjal2209/armaan/services/armaan" ) func HandlerFunc() { //router := mux.NewRouter().StrictSlash(true) router := mux.NewRouter().StrictSlash(true) router.HandleFunc("/", armaan.RegisterUser) router.HandleFunc("/getcountries/", armaan.GetCountries) log.Fatal(http.ListenAndServe(getPort(), router)) } func getPort() string { var port = os.Getenv("PORT") // Set a default port if there is nothing in the environment if port == "" { port = "5000" fmt.Println("INFO: No PORT environment variable detected, defaulting to " + port) } return ":" + port }
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
Data/Juliet-Java/Juliet-Java-v103/000/135/288/CWE369_Divide_by_Zero__float_Environment_divide_02.java
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE369_Divide_by_Zero__float_Environment_divide_02.java Label Definition File: CWE369_Divide_by_Zero__float.label.xml Template File: sources-sinks-02.tmpl.java */ /* * @description * CWE: 369 Divide by zero * BadSource: Environment Read data from an environment variable * GoodSource: A hardcoded non-zero number (two) * Sinks: divide * GoodSink: Check for zero before dividing * BadSink : Dividing by a value that may be zero * Flow Variant: 02 Control flow: if(true) and if(false) * * */ import java.util.logging.Level; public class CWE369_Divide_by_Zero__float_Environment_divide_02 extends AbstractTestCase { public void bad() throws Throwable { float data; if (true) { data = -1.0f; /* Initialize data */ /* get environment variable ADD */ /* POTENTIAL FLAW: Read data from an environment variable */ { String stringNumber = System.getenv("ADD"); if (stringNumber != null) { try { data = Float.parseFloat(stringNumber.trim()); } catch (NumberFormatException exceptNumberFormat) { IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat); } } } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = 0.0f; } if (true) { /* POTENTIAL FLAW: Possibly divide by zero */ int result = (int)(100.0 / data); IO.writeLine(result); } } /* goodG2B1() - use goodsource and badsink by changing first true to false */ private void goodG2B1() throws Throwable { float data; if (false) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = 0.0f; } else { /* FIX: Use a hardcoded number that won't a divide by zero */ data = 2.0f; } if (true) { /* POTENTIAL FLAW: Possibly divide by zero */ int result = (int)(100.0 / data); IO.writeLine(result); } } /* goodG2B2() - use goodsource and badsink by reversing statements in first if */ private void goodG2B2() throws Throwable { float data; if (true) { /* FIX: Use a hardcoded number that won't a divide by zero */ data = 2.0f; } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = 0.0f; } if (true) { /* POTENTIAL FLAW: Possibly divide by zero */ int result = (int)(100.0 / data); IO.writeLine(result); } } /* goodB2G1() - use badsource and goodsink by changing second true to false */ private void goodB2G1() throws Throwable { float data; if (true) { data = -1.0f; /* Initialize data */ /* get environment variable ADD */ /* POTENTIAL FLAW: Read data from an environment variable */ { String stringNumber = System.getenv("ADD"); if (stringNumber != null) { try { data = Float.parseFloat(stringNumber.trim()); } catch (NumberFormatException exceptNumberFormat) { IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat); } } } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = 0.0f; } if (false) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ IO.writeLine("Benign, fixed string"); } else { /* FIX: Check for value of or near zero before dividing */ if (Math.abs(data) > 0.000001) { int result = (int)(100.0 / data); IO.writeLine(result); } else { IO.writeLine("This would result in a divide by zero"); } } } /* goodB2G2() - use badsource and goodsink by reversing statements in second if */ private void goodB2G2() throws Throwable { float data; if (true) { data = -1.0f; /* Initialize data */ /* get environment variable ADD */ /* POTENTIAL FLAW: Read data from an environment variable */ { String stringNumber = System.getenv("ADD"); if (stringNumber != null) { try { data = Float.parseFloat(stringNumber.trim()); } catch (NumberFormatException exceptNumberFormat) { IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat); } } } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = 0.0f; } if (true) { /* FIX: Check for value of or near zero before dividing */ if (Math.abs(data) > 0.000001) { int result = (int)(100.0 / data); IO.writeLine(result); } else { IO.writeLine("This would result in a divide by zero"); } } } public void good() throws Throwable { goodG2B1(); goodG2B2(); goodB2G1(); goodB2G2(); } /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ public static void main(String[] args) throws ClassNotFoundException, InstantiationException, IllegalAccessException { mainFromParent(args); } }
[ "\"ADD\"", "\"ADD\"", "\"ADD\"" ]
[]
[ "ADD" ]
[]
["ADD"]
java
1
0
fhirclient/models/explanationofbenefit_tests.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 0.5.0.5149 on 2015-07-06. # 2015, SMART Health IT. import os import io import unittest import json from . import explanationofbenefit from .fhirdate import FHIRDate class ExplanationOfBenefitTests(unittest.TestCase): def instantiate_from(self, filename): datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or '' with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle: js = json.load(handle) self.assertEqual("ExplanationOfBenefit", js["resourceType"]) return explanationofbenefit.ExplanationOfBenefit(js) def testExplanationOfBenefit1(self): inst = self.instantiate_from("explanationofbenefit-example.json") self.assertIsNotNone(inst, "Must have instantiated a ExplanationOfBenefit instance") self.implExplanationOfBenefit1(inst) js = inst.as_json() self.assertEqual("ExplanationOfBenefit", js["resourceType"]) inst2 = explanationofbenefit.ExplanationOfBenefit(js) self.implExplanationOfBenefit1(inst2) def implExplanationOfBenefit1(self, inst): self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date) self.assertEqual(inst.created.as_json(), "2014-08-16") self.assertEqual(inst.disposition, "Claim settled as per contract.") self.assertEqual(inst.id, "R3500") self.assertEqual(inst.identifier[0].system, "http://www.BenefitsInc.com/fhir/eob") self.assertEqual(inst.identifier[0].value, "987654321") self.assertEqual(inst.outcome, "complete") self.assertEqual(inst.text.div, "<div>A human-readable rendering of the ExplanationOfBenefit</div>") self.assertEqual(inst.text.status, "generated")
[]
[]
[ "FHIR_UNITTEST_DATADIR" ]
[]
["FHIR_UNITTEST_DATADIR"]
python
1
0
import/stadt-muenster-topevents/TopEvents.py
# -*- coding: utf-8 -*- """ Copyright (C) 2019 Christian Römer This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. Contact: https://github.com/thunfischtoast or christian.roemer[ät]posteo.de """ import scrapy import urllib.request import urllib.parse import json import logging import os import sys import datetime import pytz class TopEventsSpider(scrapy.Spider): name = "TopEventsSpider" allowed_domains = ["www.muenster.de"] start_urls = [ "https://www.muenster.de/veranstaltungskalender/scripts/frontend/top-veranstaltungen.php", "https://www.muenster.de/veranstaltungskalender/scripts/frontend/mm2/top-veranstaltungen.php?guestID=101" ] if "ELASTICSEARCH_URL_PREFIX" in os.environ: elasticsearch_url_param = os.environ["ELASTICSEARCH_URL_PREFIX"] req_start_date = None req_end_date = None def parse(self, response): self.mapquest_api_key = getattr(self, "mapquest_key", None) if self.mapquest_api_key is None and "MAPQUEST_KEY" in os.environ: self.mapquest_api_key = os.environ["MAPQUEST_KEY"] if hasattr(self, "elasticsearch_url_param") == False: self.elasticsearch_url_param = None self.elasticsearch_url = getattr( self, "elasticsearch_url_prefix", self.elasticsearch_url_param ) detail_links = response.xpath("//a[text() = 'Details']/@href").extract() for href in detail_links: category = "top" yield response.follow( href, callback=self.extract_event, meta={"category": category} ) def extract_event(self, response): """Callback function for the detail pages. We find the indivudal data points and try to bring the date/time in proper form, then summarize it into a Event-object and return it.""" # extract the interesting data points title = self.getText(response, "titel") subtitle = self.getText(response, "untertitel") raw_datetime = self.getText(response, "datum-uhrzeit") description = self.getText(response, "detailbeschreibung") location = self.getText(response, "location") location_adresse = self.getText(response, "location-adresse") link = ( response.xpath("//div[@class='detail-link']/a/@href") .extract_first() ) if link is not None: link = link.strip(" \t\n\r") else: link = None pos = ( title ) image_url = ( response.xpath("//div[@class='tv-grafik']/img/@src") .extract_first() ) times = self.produce_dates(raw_datetime) start_date = times[0] end_date = times[1] lat = "" lng = "" # if a mapquest api key was provided we use it for geocoding if self.mapquest_api_key is not None: latLng = self.fetchMapquestCoordinates(location_adresse) if latLng is not None: lat = latLng[0] lng = latLng[1] else: self.log("No mapquest_api_key! Skip location mapping.") event = Event( title=title, subtitle=subtitle, start_date=start_date, end_date=end_date, location=location, location_addresse=location_adresse, location_lat=lat, location_lng=lng, description=description, link=link, category=response.meta["category"], pos=pos, image_url=image_url ) if ( self.elasticsearch_url is not None and isinstance(lat, float) and isinstance(lng, float) ): print( "Check before ES: " + str(self.elasticsearch_url) + "places/event_" + event["pos"] + " at pos lat:" + str(lat) + "; lng:" + str(lng) ) self.log("Putting into ES") self.put_into_es(event) return event def getText(self, response, clazz): """Find the first div with the class clazz and extract the text, stripping whitespaces and such.""" return ( response.xpath("//div[@class='" + clazz + "']/text()") .extract_first() .strip(" \t\n\r") ) def produce_dates(self, raw_datetime): """ Try to produce a clean start and end date (if it exists).""" # dates are sometimes of format "Donnerstag, 26.7.2018, 21.30 - 23.30 Uhr" # and sometimes "26.7.2018 - 22.12.2019" # if there is only a start time it's just "Donnerstag, 26.7.2018, 21.30 Uhr" # sometimes the time is missing entirely, then it's just "Donnerstag, 26.7.2018," # we'll ignore the leading day of the week self.log("----> datetime " + raw_datetime) raw_datetime = raw_datetime.replace("--","-") datetime_parts = raw_datetime.split(",") # split at commas if len(datetime_parts) > 1: date = datetime_parts[1] else: date = raw_datetime date = date.strip(" \t\n\r") # drop whitespaces and such start_time = "" end_time = "" if len(datetime_parts) > 2: # if there is a time given time = datetime_parts[2].replace("Uhr", "") # drop unnessary string time_splits = time.split("-") # split start and end time start_time = time_splits[0].strip(" \t\n\r") if len(time_splits) > 1: end_time = time_splits[1].strip(" \t\n\r") start_date = "" end_date = "" # sometimes, if the event contains two start/end times, the time looks like # 14.00 u. 16.00 # in that case, use the first one for now. In future it would be better # to retain all times if " u. " in start_time: start_time = start_time.split(" u. ")[0] if " u. " in end_time: end_time = end_time.split(" u. ")[0] # produce proper ISO conform datetime strings if start_time is "": if date.count("-"): date_splits = date.split("-") date=date_splits[0].strip(" \t\n\r") end_date = datetime.datetime.strptime(date_splits[1].strip(" \t\n\r"), "%d.%m.%Y") start_date = datetime.datetime.strptime(date, "%d.%m.%Y") # case: no time else: start_date = datetime.datetime.strptime( date + " " + start_time, "%d.%m.%Y %H.%M" ).isoformat() if end_time is not "": end_date = datetime.datetime.strptime( date + " " + end_time, "%d.%m.%Y %H.%M" ).isoformat() self.log("---> got dates " + str(start_date) + " - " + str(end_date)) return (start_date, end_date) def fetchMapquestCoordinates(self, location_adresse): """Try calling the geocoding api from mapquest. It it fails return None Documentation: https://developer.mapquest.com/documentation/open/geocoding-api/address/get/""" self.log("Attempt geocoding: " + location_adresse) contents_json = None try: parsed_location_adresse = urllib.parse.quote(location_adresse) mapquest_url = ( "http://open.mapquestapi.com/geocoding/v1/address?key=" + self.mapquest_api_key + "&location=" + parsed_location_adresse + ",M%C3%BCnster,Germany" ) logging.debug("Attempting to fetch " + mapquest_url) resource = urllib.request.urlopen(mapquest_url) contents = resource.read().decode(resource.headers.get_content_charset()) contents_json = json.loads(contents) except Exception as e: logging.warning("Location geocoding failed with exception: " + str(e)) return None status_code = contents_json["info"]["statuscode"] if status_code != 0: # some kind of error happened logging.warning("Location geocoding failed with code " + status_code) return None latLng = contents_json["results"][0]["locations"][0]["latLng"] lat = latLng["lat"] lng = latLng["lng"] self.log("LOCATION: " + str(lat) + ", " + str(lng)) if lat > 52.3 or lat < 51.8 or lng > 8 or lng < 7.3: self.log("NOT MUENSTER! Setting location to ZERO") return None # not in Muenster return (lat, lng) def put_into_es(self, event): """Push the given event into Elasticsearch""" from elasticsearch import Elasticsearch esurl, index_prefix = os.environ["ELASTICSEARCH_URL_PREFIX"].rsplit( "/", maxsplit=1 ) if hasattr(self, "es") == False: self.es = Elasticsearch(esurl) content = { "id": event["pos"], "title": event["title"], "subtitle": event["subtitle"], "start_date": str(event["start_date"]).replace(" ","T")+"Z", "description": event["description"], "link": event["link"], "category": "top", "location_name": event["location"], "location_address": event["location_addresse"], "source": "www.muenster.de", "geo": { "lat": event["location_lat"], "lon": event["location_lng"], }, "is_top_event": True, "images": [ { "image_url":event["image_url"] } ] } if "end_date" in event and event["end_date"]: content["end_date"] = str(event["end_date"]).replace(" ","T")+"Z" res = self.es.index( index=(index_prefix + "events"), doc_type="_doc", body=content, id="event_" + event["pos"], ) self.log(res) class Event(scrapy.Item): title = scrapy.Field() subtitle = scrapy.Field() start_date = scrapy.Field() end_date = scrapy.Field() location = scrapy.Field() location_addresse = scrapy.Field() location_lat = scrapy.Field() location_lng = scrapy.Field() description = scrapy.Field() link = scrapy.Field() category = scrapy.Field() pos = scrapy.Field() image_url = scrapy.Field()
[]
[]
[ "ELASTICSEARCH_URL_PREFIX", "MAPQUEST_KEY" ]
[]
["ELASTICSEARCH_URL_PREFIX", "MAPQUEST_KEY"]
python
2
0
evaluation/early_stage/prune.py
""" Pruning a pre-trained model by GSP. Author: Ruizhe Zhao Date: 12/02/2019 The work-flow of this script: - load a pre-trained model (suffixed by 'm') - compute the mask based on weights - fine-tune the model """ import os import sys import argparse import copy import time import shutil import json import logging logging.getLogger().setLevel(logging.DEBUG) import numpy as np import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data as data import torchvision.transforms as transforms import torchvision.datasets as datasets from gumi.ops.mask_conv2d import MaskConv2d from gumi.pruning import prune_utils from gumi import model_utils from gumi import models # a module contains all supported models model_names = sorted( name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name]) ) import cifar_utils from utils import * # import utilities provided by pytorch-classification from parser import create_parser # argument parser for evaluation tasks from pruner import Pruner parser = create_parser() args = parser.parse_args() # CUDA os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id use_cuda = torch.cuda.is_available() cudnn.benchmark = True def write_summary(args, file_name="summary.json", **kwargs): """ Write summary to a JSON file. """ summary_file = "{}/{}".format(args.checkpoint, file_name) with open(summary_file, "w") as f: json.dump(kwargs, f) def main(): # initialize the pruner pruner = Pruner(args) # pruner.prune(args.checkpoint) pruner.evaluate() # Run regularization pruner.prune( args.checkpoint, fake_mask=True, perm=args.perm, num_iters=args.num_sort_iters ) pruner.evaluate() pruner.regularize() pruner.apply_mask() pruner.evaluate() logging.debug("Fine-tuning model for {} epochs".format(args.epochs)) best_acc = pruner.fine_tune(args.epochs) logging.debug("Fine-tuned model") pruner.evaluate() write_summary(args, best_acc=best_acc) if __name__ == "__main__": main()
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
tools/make_distrib.py
# Copyright (c) 2011 The Chromium Embedded Framework Authors. All rights # reserved. Use of this source code is governed by a BSD-style license that # can be found in the LICENSE file. from date_util import * from file_util import * from gclient_util import * from optparse import OptionParser import os import re import shlex import subprocess from svn_util import * import sys import zipfile def create_archive(input_dir, zip_file): """ Creates a zip archive of the specified input directory. """ zf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED) def addDir(dir): for f in os.listdir(dir): full_path = os.path.join(dir, f) if os.path.isdir(full_path): addDir(full_path) else: zf.write(full_path, os.path.relpath(full_path, \ os.path.join(input_dir, os.pardir))) addDir(input_dir) zf.close() def create_readme(src, output_dir, cef_url, cef_rev, cef_ver, chromium_url, \ chromium_rev, chromium_ver, date): """ Creates the README.TXT file. """ data = read_file(src) data = data.replace('$CEF_URL$', cef_url) data = data.replace('$CEF_REV$', cef_rev) data = data.replace('$CEF_VER$', cef_ver) data = data.replace('$CHROMIUM_URL$', chromium_url) data = data.replace('$CHROMIUM_REV$', chromium_rev) data = data.replace('$CHROMIUM_VER$', chromium_ver) data = data.replace('$DATE$', date) write_file(os.path.join(output_dir, 'README.txt'), data) if not options.quiet: sys.stdout.write('Creating README.TXT file.\n') def eval_file(src): """ Loads and evaluates the contents of the specified file. """ return eval(read_file(src), {'__builtins__': None}, None) def transfer_gypi_files(src_dir, gypi_paths, gypi_path_prefix, dst_dir, quiet): """ Transfer files from one location to another. """ for path in gypi_paths: # skip gyp includes if path[:2] == '<@': continue src = os.path.join(src_dir, path) dst = os.path.join(dst_dir, path.replace(gypi_path_prefix, '')) dst_path = os.path.dirname(dst) make_dir(dst_path, quiet) copy_file(src, dst, quiet) def normalize_headers(file, new_path = ''): """ Normalize headers post-processing. Remove the path component from any project include directives. """ data = read_file(file) data = re.sub(r'''#include \"(?!include\/)[a-zA-Z0-9_\/]+\/+([a-zA-Z0-9_\.]+)\"''', \ "// Include path modified for CEF Binary Distribution.\n#include \""+new_path+"\\1\"", data) write_file(file, data) def transfer_files(cef_dir, script_dir, transfer_cfg, output_dir, quiet): """ Transfer files based on the specified configuration. """ if not path_exists(transfer_cfg): return configs = eval_file(transfer_cfg) for cfg in configs: dst = os.path.join(output_dir, cfg['target']) # perform a copy if source is specified if not cfg['source'] is None: src = os.path.join(cef_dir, cfg['source']) dst_path = os.path.dirname(dst) make_dir(dst_path, quiet) copy_file(src, dst, quiet) # place a readme file in the destination directory readme = os.path.join(dst_path, 'README-TRANSFER.txt') if not path_exists(readme): copy_file(os.path.join(script_dir, 'distrib/README-TRANSFER.txt'), readme) open(readme, 'ab').write(cfg['source']+"\n") # perform any required post-processing if 'post-process' in cfg: post = cfg['post-process'] if post == 'normalize_headers': new_path = '' if cfg.has_key('new_header_path'): new_path = cfg['new_header_path'] normalize_headers(dst, new_path) def generate_msvs_projects(version): """ Generate MSVS projects for the specified version. """ sys.stdout.write('Generating '+version+' project files...') os.environ['GYP_MSVS_VERSION'] = version gyper = [ 'python', 'tools/gyp_cef', os.path.relpath(os.path.join(output_dir, 'cefclient.gyp'), cef_dir) ] RunAction(cef_dir, gyper); move_file(os.path.relpath(os.path.join(output_dir, 'cefclient.sln')), \ os.path.relpath(os.path.join(output_dir, 'cefclient'+version+'.sln'))) def fix_msvs_projects(): """ Fix the output directory path in all .vcproj and .vcxproj files. """ files = [] for file in get_files(os.path.join(output_dir, '*.vcproj')): files.append(file) for file in get_files(os.path.join(output_dir, '*.vcxproj')): files.append(file) for file in files: data = read_file(file) data = data.replace('../../..\\build\\', '') write_file(file, data) def run(command_line, working_dir): """ Run a command. """ sys.stdout.write('-------- Running "'+command_line+'" in "'+\ working_dir+'"...'+"\n") args = shlex.split(command_line.replace('\\', '\\\\')) return subprocess.check_call(args, cwd=working_dir, env=os.environ, shell=(sys.platform == 'win32')) # cannot be loaded as a module if __name__ != "__main__": sys.stderr.write('This file cannot be loaded as a module!') sys.exit() # parse command-line options disc = """ This utility builds the CEF Binary Distribution. """ parser = OptionParser(description=disc) parser.add_option('--output-dir', dest='outputdir', metavar='DIR', help='output directory [required]') parser.add_option('--allow-partial', action='store_true', dest='allowpartial', default=False, help='allow creation of partial distributions') parser.add_option('--no-symbols', action='store_true', dest='nosymbols', default=False, help='do not create symbol files') parser.add_option('-q', '--quiet', action='store_true', dest='quiet', default=False, help='do not output detailed status information') (options, args) = parser.parse_args() # the outputdir option is required if options.outputdir is None: parser.print_help(sys.stdout) sys.exit() # script directory script_dir = os.path.dirname(__file__) # CEF root directory cef_dir = os.path.abspath(os.path.join(script_dir, os.pardir)) # src directory src_dir = os.path.abspath(os.path.join(cef_dir, os.pardir)) # retrieve url, revision and date information cef_info = get_svn_info(cef_dir) cef_url = cef_info['url'] cef_rev = cef_info['revision'] chromium_info = get_svn_info(os.path.join(cef_dir, os.pardir)) chromium_url = chromium_info['url'] chromium_rev = chromium_info['revision'] date = get_date() # Read and parse the version file (key=value pairs, one per line) args = {} read_version_file(os.path.join(cef_dir, 'VERSION'), args) read_version_file(os.path.join(cef_dir, '../chrome/VERSION'), args) cef_ver = args['CEF_MAJOR']+'.'+args['BUILD']+'.'+cef_rev chromium_ver = args['MAJOR']+'.'+args['MINOR']+'.'+args['BUILD']+'.'+args['PATCH'] # Test the operating system. platform = ''; if sys.platform == 'win32': platform = 'windows' elif sys.platform == 'darwin': platform = 'macosx' elif sys.platform.startswith('linux'): platform = 'linux' # output directory output_dir = os.path.abspath(os.path.join(options.outputdir, \ 'cef_binary_'+cef_ver+'_'+platform)) remove_dir(output_dir, options.quiet) make_dir(output_dir, options.quiet) if not options.nosymbols: # symbol directory symbol_dir = os.path.abspath(os.path.join(options.outputdir, \ 'cef_binary_'+cef_ver+'_'+platform+'_symbols')) remove_dir(symbol_dir, options.quiet) make_dir(symbol_dir, options.quiet) # transfer the LICENSE.txt file copy_file(os.path.join(cef_dir, 'LICENSE.txt'), output_dir, options.quiet) # read the variables list from the autogenerated cef_paths.gypi file cef_paths = eval_file(os.path.join(cef_dir, 'cef_paths.gypi')) cef_paths = cef_paths['variables'] # read the variables list from the manually edited cef_paths2.gypi file cef_paths2 = eval_file(os.path.join(cef_dir, 'cef_paths2.gypi')) cef_paths2 = cef_paths2['variables'] # create the include directory include_dir = os.path.join(output_dir, 'include') make_dir(include_dir, options.quiet) # create the cefclient directory cefclient_dir = os.path.join(output_dir, 'cefclient') make_dir(cefclient_dir, options.quiet) # create the libcef_dll_wrapper directory wrapper_dir = os.path.join(output_dir, 'libcef_dll') make_dir(wrapper_dir, options.quiet) # transfer common include files transfer_gypi_files(cef_dir, cef_paths2['includes_common'], \ 'include/', include_dir, options.quiet) transfer_gypi_files(cef_dir, cef_paths2['includes_capi'], \ 'include/', include_dir, options.quiet) transfer_gypi_files(cef_dir, cef_paths2['includes_wrapper'], \ 'include/', include_dir, options.quiet) transfer_gypi_files(cef_dir, cef_paths['autogen_cpp_includes'], \ 'include/', include_dir, options.quiet) transfer_gypi_files(cef_dir, cef_paths['autogen_capi_includes'], \ 'include/', include_dir, options.quiet) # transfer common cefclient files transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_common'], \ 'tests/cefclient/', cefclient_dir, options.quiet) # transfer common libcef_dll_wrapper files transfer_gypi_files(cef_dir, cef_paths2['libcef_dll_wrapper_sources_common'], \ 'libcef_dll/', wrapper_dir, options.quiet) transfer_gypi_files(cef_dir, cef_paths['autogen_client_side'], \ 'libcef_dll/', wrapper_dir, options.quiet) # transfer gyp files copy_file(os.path.join(script_dir, 'distrib/cefclient.gyp'), output_dir, options.quiet) paths_gypi = os.path.join(cef_dir, 'cef_paths2.gypi') data = read_file(paths_gypi) data = data.replace('tests/cefclient/', 'cefclient/') write_file(os.path.join(output_dir, 'cef_paths2.gypi'), data) copy_file(os.path.join(cef_dir, 'cef_paths.gypi'), \ os.path.join(output_dir, 'cef_paths.gypi'), options.quiet) # transfer additional files transfer_files(cef_dir, script_dir, os.path.join(script_dir, 'distrib/transfer.cfg'), \ output_dir, options.quiet) if platform == 'windows': # create the README.TXT file create_readme(os.path.join(script_dir, 'distrib/win/README.txt'), output_dir, cef_url, \ cef_rev, cef_ver, chromium_url, chromium_rev, chromium_ver, date) # transfer include files transfer_gypi_files(cef_dir, cef_paths2['includes_win'], \ 'include/', include_dir, options.quiet) # transfer cefclient files transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_win'], \ 'tests/cefclient/', cefclient_dir, options.quiet) # transfer build/Debug files build_dir = os.path.join(src_dir, 'build/Debug'); if not options.allowpartial or path_exists(build_dir): dst_dir = os.path.join(output_dir, 'Debug') make_dir(dst_dir, options.quiet) copy_files(os.path.join(script_dir, 'distrib/win/*.dll'), dst_dir, options.quiet) copy_files(os.path.join(build_dir, '*.dll'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'cefclient.exe'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'cef.pak'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'devtools_resources.pak'), dst_dir, options.quiet) copy_dir(os.path.join(build_dir, 'locales'), os.path.join(dst_dir, 'locales'), \ options.quiet) # transfer lib/Debug files dst_dir = os.path.join(output_dir, 'lib/Debug') make_dir(dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'lib/libcef.lib'), dst_dir, options.quiet) else: sys.stderr.write("No Debug build files.\n") # transfer build/Release files build_dir = os.path.join(src_dir, 'build/Release'); if not options.allowpartial or path_exists(build_dir): dst_dir = os.path.join(output_dir, 'Release') make_dir(dst_dir, options.quiet) copy_files(os.path.join(script_dir, 'distrib/win/*.dll'), dst_dir, options.quiet) copy_files(os.path.join(build_dir, '*.dll'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'cefclient.exe'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'cef.pak'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'devtools_resources.pak'), dst_dir, options.quiet) copy_dir(os.path.join(build_dir, 'locales'), os.path.join(dst_dir, 'locales'), \ options.quiet) # transfer lib/Release files dst_dir = os.path.join(output_dir, 'lib/Release') make_dir(dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'lib/libcef.lib'), dst_dir, options.quiet) if not options.nosymbols: # transfer symbols copy_file(os.path.join(build_dir, 'libcef.pdb'), symbol_dir, options.quiet) else: sys.stderr.write("No Release build files.\n") # generate doc files os.popen('make_cppdocs.bat '+cef_rev) # transfer docs files dst_dir = os.path.join(output_dir, 'docs') src_dir = os.path.join(cef_dir, 'docs') if path_exists(src_dir): copy_dir(src_dir, dst_dir, options.quiet) # transfer additional files, if any transfer_files(cef_dir, script_dir, os.path.join(script_dir, 'distrib/win/transfer.cfg'), \ output_dir, options.quiet) # generate the project files generate_msvs_projects('2005'); generate_msvs_projects('2008'); generate_msvs_projects('2010'); fix_msvs_projects(); elif platform == 'macosx': # create the README.TXT file create_readme(os.path.join(script_dir, 'distrib/mac/README.txt'), output_dir, cef_url, \ cef_rev, cef_ver, chromium_url, chromium_rev, chromium_ver, date) # transfer include files transfer_gypi_files(cef_dir, cef_paths2['includes_mac'], \ 'include/', include_dir, options.quiet) # transfer cefclient files transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_mac'], \ 'tests/cefclient/', cefclient_dir, options.quiet) transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_mac_helper'], \ 'tests/cefclient/', cefclient_dir, options.quiet) # transfer cefclient/mac files copy_dir(os.path.join(cef_dir, 'tests/cefclient/mac/'), os.path.join(output_dir, 'cefclient/mac/'), \ options.quiet) # transfer xcodebuild/Debug files build_dir = os.path.join(src_dir, 'xcodebuild/Debug') if not options.allowpartial or path_exists(build_dir): dst_dir = os.path.join(output_dir, 'Debug') make_dir(dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'ffmpegsumo.so'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'libcef.dylib'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'libplugin_carbon_interpose.dylib'), dst_dir, options.quiet) else: build_dir = None # transfer xcodebuild/Release files build_dir = os.path.join(src_dir, 'xcodebuild/Release') if not options.allowpartial or path_exists(build_dir): dst_dir = os.path.join(output_dir, 'Release') make_dir(dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'ffmpegsumo.so'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'libcef.dylib'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'libplugin_carbon_interpose.dylib'), dst_dir, options.quiet) if not options.nosymbols: # create the real dSYM file from the "fake" dSYM file sys.stdout.write("Creating the real dSYM file...\n") src_path = os.path.join(build_dir, 'libcef.dylib.dSYM/Contents/Resources/DWARF/libcef.dylib') dst_path = os.path.join(symbol_dir, 'libcef.dylib.dSYM') run('dsymutil '+src_path+' -o '+dst_path, cef_dir) else: build_dir = None if not build_dir is None: # transfer resource files dst_dir = os.path.join(output_dir, 'Resources') make_dir(dst_dir, options.quiet) copy_files(os.path.join(build_dir, 'cefclient.app/Contents/Frameworks/Chromium Embedded Framework.framework/Resources/*.*'), \ dst_dir, options.quiet) # transfer additional files, if any transfer_files(cef_dir, script_dir, os.path.join(script_dir, 'distrib/mac/transfer.cfg'), \ output_dir, options.quiet) # Generate Xcode project files sys.stdout.write('Generating Xcode project files...') gyper = [ 'python', 'tools/gyp_cef', os.path.relpath(os.path.join(output_dir, 'cefclient.gyp'), cef_dir) ] RunAction(cef_dir, gyper); # Post-process the Xcode project to fix file paths src_file = os.path.join(output_dir, 'cefclient.xcodeproj/project.pbxproj') data = read_file(src_file) data = data.replace('../../../build/mac/', 'tools/') data = data.replace('../../../build', 'build') data = data.replace('../../../xcodebuild', 'xcodebuild') write_file(src_file, data) elif platform == 'linux': # create the README.TXT file create_readme(os.path.join(script_dir, 'distrib/linux/README.txt'), output_dir, cef_url, \ cef_rev, cef_ver, chromium_url, chromium_rev, chromium_ver, date) # transfer out/Debug files build_dir = os.path.join(src_dir, 'out/Debug'); if not options.allowpartial or path_exists(build_dir): dst_dir = os.path.join(output_dir, 'Debug') make_dir(dst_dir, options.quiet) copy_dir(os.path.join(build_dir, 'lib.target'), os.path.join(dst_dir, 'lib.target'), options.quiet) copy_file(os.path.join(build_dir, 'cefclient'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'cef.pak'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'devtools_resources.pak'), dst_dir, options.quiet) copy_dir(os.path.join(build_dir, 'locales'), os.path.join(dst_dir, 'locales'), options.quiet) else: sys.stderr.write("No Debug build files.\n") # transfer out/Release files build_dir = os.path.join(src_dir, 'out/Release'); if not options.allowpartial or path_exists(build_dir): dst_dir = os.path.join(output_dir, 'Release') make_dir(dst_dir, options.quiet) copy_dir(os.path.join(build_dir, 'lib.target'), os.path.join(dst_dir, 'lib.target'), options.quiet) copy_file(os.path.join(build_dir, 'cefclient'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'cef.pak'), dst_dir, options.quiet) copy_file(os.path.join(build_dir, 'devtools_resources.pak'), dst_dir, options.quiet) copy_dir(os.path.join(build_dir, 'locales'), os.path.join(dst_dir, 'locales'), options.quiet) else: sys.stderr.write("No Release build files.\n") # transfer include files transfer_gypi_files(cef_dir, cef_paths2['includes_linux'], \ 'include/', include_dir, options.quiet) # transfer cefclient files transfer_gypi_files(cef_dir, cef_paths2['cefclient_sources_linux'], \ 'tests/cefclient/', cefclient_dir, options.quiet) # transfer additional files, if any transfer_files(cef_dir, script_dir, os.path.join(script_dir, 'distrib/linux/transfer.cfg'), \ output_dir, options.quiet) # Create an archive of the output directory zip_file = os.path.split(output_dir)[1] + '.zip' if not options.quiet: sys.stdout.write('Creating '+zip_file+"...\n") create_archive(output_dir, os.path.join(output_dir, os.pardir, zip_file)) if not options.nosymbols: # Create an archive of the symbol directory zip_file = os.path.split(symbol_dir)[1] + '.zip' if not options.quiet: sys.stdout.write('Creating '+zip_file+"...\n") create_archive(symbol_dir, os.path.join(symbol_dir, os.pardir, zip_file))
[]
[]
[ "GYP_MSVS_VERSION" ]
[]
["GYP_MSVS_VERSION"]
python
1
0
examples/mmt_train_kmeans.py
from __future__ import print_function, absolute_import import argparse import os.path as osp import random import numpy as np import sys from sklearn.cluster import KMeans from sklearn.preprocessing import normalize import torch from torch import nn from torch.backends import cudnn from torch.utils.data import DataLoader from mmt import datasets from mmt import models from mmt.trainers import MMTTrainer from mmt.evaluators import Evaluator, extract_features from mmt.utils.data import IterLoader from mmt.utils.data import transforms as T from mmt.utils.data.sampler import RandomMultipleGallerySampler from mmt.utils.data.preprocessor import Preprocessor from mmt.utils.logging import Logger from mmt.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict import os best_mAP = 0 def get_data(name, data_dir): root = osp.join(data_dir, name) dataset = datasets.create(name, root) return dataset def get_train_loader(dataset, height, width, batch_size, workers, num_instances, iters): normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transformer = T.Compose([ T.Resize((height, width), interpolation=3), T.RandomHorizontalFlip(p=0.5), T.Pad(10), T.RandomCrop((height, width)), T.ToTensor(), normalizer, T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406]) ]) train_set = sorted(dataset.train, key=lambda x:x[1]) rmgs_flag = num_instances > 0 if rmgs_flag: sampler = RandomMultipleGallerySampler(train_set, num_instances) else: sampler = None train_loader = IterLoader( DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer, mutual=True), batch_size=batch_size, num_workers=workers, sampler=sampler, shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters) return train_loader def get_test_loader(dataset, height, width, batch_size, workers, testset=None): normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) test_transformer = T.Compose([ T.Resize((height, width), interpolation=3), T.ToTensor(), normalizer ]) if (testset is None): testset = list(set(dataset.query) | set(dataset.gallery)) test_loader = DataLoader( Preprocessor(testset, root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) else: test_loader = DataLoader( Preprocessor(testset, root=dataset.images_dir, transform=test_transformer, mutual=False, cluster=True), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return test_loader def create_model(args): model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters) model_2 = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters) model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters) model_2_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters) model_1.cuda() model_2.cuda() model_1_ema.cuda() model_2_ema.cuda() model_1 = nn.DataParallel(model_1) model_2 = nn.DataParallel(model_2) model_1_ema = nn.DataParallel(model_1_ema) model_2_ema = nn.DataParallel(model_2_ema) initial_weights = load_checkpoint(args.init_1) copy_state_dict(initial_weights['state_dict'], model_1) copy_state_dict(initial_weights['state_dict'], model_1_ema) model_1_ema.module.classifier.weight.data.copy_(model_1.module.classifier.weight.data) initial_weights = load_checkpoint(args.init_2) copy_state_dict(initial_weights['state_dict'], model_2) copy_state_dict(initial_weights['state_dict'], model_2_ema) model_2_ema.module.classifier.weight.data.copy_(model_2.module.classifier.weight.data) for param in model_1_ema.parameters(): param.detach_() for param in model_2_ema.parameters(): param.detach_() return model_1, model_2, model_1_ema, model_2_ema def main(): args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' # CUDA environment if args.seed is not None: random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True main_worker(args) def main_worker(args): global best_mAP cudnn.benchmark = True sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt')) print("==========\nArgs:{}\n==========".format(args)) # Create data loaders iters = args.iters if (args.iters>0) else None dataset_target = get_data(args.dataset_target, args.data_dir) test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers) cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers, testset=dataset_target.train) # Create model model_1, model_2, model_1_ema, model_2_ema = create_model(args) # Evaluator evaluator_1_ema = Evaluator(model_1_ema) evaluator_2_ema = Evaluator(model_2_ema) for epoch in range(args.epochs): dict_f, _ = extract_features(model_1_ema, cluster_loader, print_freq=50) cf_1 = torch.stack(list(dict_f.values())).numpy() dict_f, _ = extract_features(model_2_ema, cluster_loader, print_freq=50) cf_2 = torch.stack(list(dict_f.values())).numpy() cf = (cf_1+cf_2)/2 print('\n Clustering into {} classes \n'.format(args.num_clusters)) km = KMeans(n_clusters=args.num_clusters, random_state=args.seed, n_jobs=2).fit(cf) model_1.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda()) model_2.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda()) model_1_ema.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda()) model_2_ema.module.classifier.weight.data.copy_(torch.from_numpy(normalize(km.cluster_centers_, axis=1)).float().cuda()) target_label = km.labels_ # change pseudo labels for i in range(len(dataset_target.train)): dataset_target.train[i] = list(dataset_target.train[i]) dataset_target.train[i][2] = int(target_label[i]) ## change dataset_target.train[i][1] --> [2] dataset_target.train[i] = tuple(dataset_target.train[i]) # the place to re-compute cluster centers (e.g.500) with re-assigned pseudo labels # based on the memory slot which contains the features of each target training images train_loader_target = get_train_loader(dataset_target, args.height, args.width, args.batch_size, args.workers, args.num_instances, iters) # Optimizer params = [] for key, value in model_1.named_parameters(): if not value.requires_grad: continue params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}] for key, value in model_2.named_parameters(): if not value.requires_grad: continue params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}] optimizer = torch.optim.Adam(params) # Trainer ## trainer = MMTTrainer(model_1, model_2, model_1_ema, model_2_ema, num_cluster=args.num_clusters, alpha=args.alpha, cf=cf, f_memory_label=target_label) ## train_loader_target.new_epoch() trainer.train(epoch, train_loader_target, optimizer, ce_soft_weight=args.soft_ce_weight, tri_soft_weight=args.soft_tri_weight, print_freq=args.print_freq, train_iters=len(train_loader_target)) def save_model(model_ema, is_best, best_mAP, mid): save_checkpoint({ 'state_dict': model_ema.state_dict(), 'epoch': epoch + 1, 'best_mAP': best_mAP, }, is_best, fpath=osp.join(args.logs_dir, 'model'+str(mid)+'_checkpoint.pth.tar')) if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)): mAP_1 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=False) mAP_2 = evaluator_2_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=False) is_best = (mAP_1>best_mAP) or (mAP_2>best_mAP) best_mAP = max(mAP_1, mAP_2, best_mAP) save_model(model_1_ema, (is_best and (mAP_1>mAP_2)), best_mAP, 1) save_model(model_2_ema, (is_best and (mAP_1<=mAP_2)), best_mAP, 2) print('\n * Finished epoch {:3d} model no.1 mAP: {:5.1%} model no.2 mAP: {:5.1%} best: {:5.1%}{}\n'. format(epoch, mAP_1, mAP_2, best_mAP, ' *' if is_best else '')) print ('Test on the best model.') checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar')) model_1_ema.load_state_dict(checkpoint['state_dict']) evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True) if __name__ == '__main__': parser = argparse.ArgumentParser(description="MMT Training") # data parser.add_argument('-dt', '--dataset-target', type=str, default='market1501', choices=datasets.names()) parser.add_argument('-b', '--batch-size', type=int, default=64) parser.add_argument('-j', '--workers', type=int, default=4) parser.add_argument('--num-clusters', type=int, default=500) parser.add_argument('--height', type=int, default=256, help="input height") parser.add_argument('--width', type=int, default=128, help="input width") parser.add_argument('--num-instances', type=int, default=4, help="each minibatch consist of " "(batch_size // num_instances) identities, and " "each identity has num_instances instances, " "default: 0 (NOT USE)") # model parser.add_argument('-a', '--arch', type=str, default='resnet50', choices=models.names()) parser.add_argument('--features', type=int, default=0) parser.add_argument('--dropout', type=float, default=0) # optimizer parser.add_argument('--lr', type=float, default=0.00035, help="learning rate of new parameters, for pretrained " "parameters it is 10 times smaller than this") parser.add_argument('--momentum', type=float, default=0.9) parser.add_argument('--alpha', type=float, default=0.999) parser.add_argument('--moving-avg-momentum', type=float, default=0) parser.add_argument('--weight-decay', type=float, default=5e-4) parser.add_argument('--soft-ce-weight', type=float, default=0.5) parser.add_argument('--soft-tri-weight', type=float, default=0.8) parser.add_argument('--epochs', type=int, default=40) parser.add_argument('--iters', type=int, default=800) # training configs parser.add_argument('--init-1', type=str, default='', metavar='PATH') parser.add_argument('--init-2', type=str, default='', metavar='PATH') parser.add_argument('--seed', type=int, default=1) parser.add_argument('--print-freq', type=int, default=1) parser.add_argument('--eval-step', type=int, default=1) # path working_dir = osp.dirname(osp.abspath(__file__)) parser.add_argument('--data-dir', type=str, metavar='PATH', default=osp.join(working_dir, 'data')) parser.add_argument('--logs-dir', type=str, metavar='PATH', default=osp.join(working_dir, 'logs')) #parser.add_argument('--memory_size', type=int, default=12936) main()
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
tests/check_framework/urls/path_compatibility/ending_with_dollar.py
from django.urls import path urlpatterns = [ path("ending-with-dollar$", lambda x: x), ]
[]
[]
[]
[]
[]
python
null
null
null
builder/civo/config.go
//go:generate struct-markdown //go:generate mapstructure-to-hcl2 -type Config package civo import ( "errors" "fmt" "os" "time" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/common/uuid" "github.com/hashicorp/packer/helper/communicator" "github.com/hashicorp/packer/helper/config" "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/template/interpolate" "github.com/mitchellh/mapstructure" ) // Config to teh packer type Config struct { common.PackerConfig `mapstructure:",squash"` Comm communicator.Config `mapstructure:",squash"` // The client TOKEN to use to access your account. It // can also be specified via environment variable CIVO_TOKEN, if // set. APIToken string `mapstructure:"api_token" required:"true"` // The name (or slug) of the region to launch the instance // in. Consequently, this is the region where the snapshot will be available. Region string `mapstructure:"region" required:"true"` // The name (or slug) of the instance size to use. See Size string `mapstructure:"size" required:"true"` // The name (or slug) of the base image to use. This is the // image that will be used to launch a new instance and provision it. Template string `mapstructure:"template" required:"true"` // Set to true to enable private networking // for the instance being created. This defaults to true. PublicNetworking string `mapstructure:"private_networking" required:"false"` // The name of the resulting snapshot that will // appear in your account. Defaults to `packer-{{timestamp}}` (see // configuration templates for more info). SnapshotName string `mapstructure:"snapshot_name" required:"false"` // The regions of the resulting // snapshot that will appear in your account. SnapshotRegions []string `mapstructure:"snapshot_regions" required:"false"` // The time to wait, as a duration string, for a // instance to enter a desired state (such as "active") before timing out. The // default state timeout is "6m". StateTimeout time.Duration `mapstructure:"state_timeout" required:"false"` // How long to wait for an image to be published to the shared image // gallery before timing out. If your Packer build is failing on the // Publishing to Shared Image Gallery step with the error `Original Error: // context deadline exceeded`, but the image is present when you check your // Azure dashboard, then you probably need to increase this timeout from // its default of "60m" (valid time units include `s` for seconds, `m` for // minutes, and `h` for hours.) SnapshotTimeout time.Duration `mapstructure:"snapshot_timeout" required:"false"` // The name assigned to the instance. Civo sets the hostname of the machine to this value. InstanceName string `mapstructure:"instance_name" required:"false"` ctx interpolate.Context } // Prepare function to prepare the builder func (c *Config) Prepare(raws ...interface{}) ([]string, error) { var md mapstructure.Metadata err := config.Decode(c, &config.DecodeOpts{ Metadata: &md, Interpolate: true, InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "run_command", }, }, }, raws...) if err != nil { return nil, err } // Defaults if c.APIToken == "" { // Default to environment variable for api_token, if it exists c.APIToken = os.Getenv("CIVO_TOKEN") } if c.SnapshotName == "" { def, err := interpolate.Render("civo-packer-{{timestamp}}", nil) if err != nil { panic(err) } // Default to civo-packer-{{ unix timestamp (utc) }} c.SnapshotName = def } if c.InstanceName == "" { // Default to packer-[time-ordered-uuid] c.InstanceName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) } if c.StateTimeout == 0 { // Default to 6 minute timeouts waiting for // desired state. i.e waiting for instance to become active c.StateTimeout = 6 * time.Minute } if c.SnapshotTimeout == 0 { // Default to 60 minutes timeout, waiting for snapshot action to finish c.SnapshotTimeout = 60 * time.Minute } if c.PublicNetworking == "" { c.PublicNetworking = "true" } var errs *packer.MultiError if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { errs = packer.MultiErrorAppend(errs, es...) } if c.APIToken == "" { // Required configurations that will display errors if not set errs = packer.MultiErrorAppend( errs, errors.New("api_token for auth must be specified")) } if c.Region == "" { errs = packer.MultiErrorAppend( errs, errors.New("region is required")) } if c.Size == "" { errs = packer.MultiErrorAppend( errs, errors.New("size is required")) } if c.Template == "" { errs = packer.MultiErrorAppend( errs, errors.New("template is required")) } if errs != nil && len(errs.Errors) > 0 { return nil, errs } packer.LogSecretFilter.Set(c.APIToken) return nil, nil }
[ "\"CIVO_TOKEN\"" ]
[]
[ "CIVO_TOKEN" ]
[]
["CIVO_TOKEN"]
go
1
0
testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.hive.ptest.execution; import java.io.File; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.Options; import org.apache.hive.ptest.execution.conf.ExecutionContextConfiguration; import org.apache.hive.ptest.execution.conf.Host; import org.apache.hive.ptest.execution.conf.TestConfiguration; import org.apache.hive.ptest.execution.conf.TestParser; import org.apache.hive.ptest.execution.context.ExecutionContext; import org.apache.hive.ptest.execution.context.ExecutionContextProvider; import org.apache.hive.ptest.execution.ssh.NonZeroExitCodeException; import org.apache.hive.ptest.execution.ssh.RSyncCommandExecutor; import org.apache.hive.ptest.execution.ssh.SSHCommandExecutor; import org.apache.velocity.app.Velocity; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Joiner; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.io.Files; import com.google.common.io.Resources; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; public class PTest { static { Velocity.init(); } private static final Logger LOG = LoggerFactory .getLogger(PTest.class); private final TestConfiguration mConfiguration; private final ListeningExecutorService mExecutor; private final Set<String> mExecutedTests; private final Set<String> mFailedTests; private final List<Phase> mPhases; private final ExecutionContext mExecutionContext; private final Logger mLogger; private final List<HostExecutor> mHostExecutors; private final String mBuildTag; private final SSHCommandExecutor mSshCommandExecutor; private final RSyncCommandExecutor mRsyncCommandExecutor; public PTest(final TestConfiguration configuration, final ExecutionContext executionContext, final String buildTag, final File logDir, final LocalCommandFactory localCommandFactory, final SSHCommandExecutor sshCommandExecutor, final RSyncCommandExecutor rsyncCommandExecutor, final Logger logger) throws Exception { mConfiguration = configuration; mLogger = logger; mBuildTag = buildTag; mExecutedTests = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>()); mFailedTests = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>()); mExecutionContext = executionContext; mSshCommandExecutor = sshCommandExecutor; mRsyncCommandExecutor = rsyncCommandExecutor; mExecutor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool()); final File failedLogDir = Dirs.create(new File(logDir, "failed")); final File succeededLogDir = Dirs.create(new File(logDir, "succeeded")); final File scratchDir = Dirs.createEmpty(new File(mExecutionContext.getLocalWorkingDirectory(), "scratch")); File patchDir = Dirs.createEmpty(new File(logDir, "patches")); File patchFile = null; if(!configuration.getPatch().isEmpty()) { patchFile = new File(patchDir, buildTag + ".patch"); Files.write(Resources.toByteArray(new URL(configuration.getPatch())), patchFile); } ImmutableMap.Builder<String, String> templateDefaultsBuilder = ImmutableMap.builder(); templateDefaultsBuilder. put("repository", configuration.getRepository()). put("repositoryName", configuration.getRepositoryName()). put("repositoryType", configuration.getRepositoryType()). put("buildTool", configuration.getBuildTool()). put("branch", configuration.getBranch()). put("clearLibraryCache", String.valueOf(configuration.isClearLibraryCache())). put("workingDir", mExecutionContext.getLocalWorkingDirectory()). put("buildTag", buildTag). put("logDir", logDir.getAbsolutePath()). put("javaHome", configuration.getJavaHome()). put("javaHomeForTests", configuration.getJavaHomeForTests()). put("antEnvOpts", configuration.getAntEnvOpts()). put("antArgs", configuration.getAntArgs()). put("antTestArgs", configuration.getAntTestArgs()). put("antTestTarget", configuration.getAntTestTarget()). put("mavenEnvOpts", configuration.getMavenEnvOpts()). put("mavenArgs", configuration.getMavenArgs()). put("mavenBuildArgs", configuration.getMavenBuildArgs()). put("mavenTestArgs", configuration.getMavenTestArgs()); final ImmutableMap<String, String> templateDefaults = templateDefaultsBuilder.build(); TestParser testParser = new TestParser(configuration.getContext(), configuration.getTestCasePropertyName(), new File(mExecutionContext.getLocalWorkingDirectory(), configuration.getRepositoryName() + "-source"), logger); HostExecutorBuilder hostExecutorBuilder = new HostExecutorBuilder() { @Override public HostExecutor build(Host host) { return new HostExecutor(host, executionContext.getPrivateKey(), mExecutor, sshCommandExecutor, rsyncCommandExecutor, templateDefaults, scratchDir, succeededLogDir, failedLogDir, 10, logger); } }; List<HostExecutor> hostExecutors = new ArrayList<HostExecutor>(); for(Host host : mExecutionContext.getHosts()) { hostExecutors.add(hostExecutorBuilder.build(host)); } mHostExecutors = new CopyOnWriteArrayList<HostExecutor>(hostExecutors); mPhases = Lists.newArrayList(); mPhases.add(new PrepPhase(mHostExecutors, localCommandFactory, templateDefaults, scratchDir, patchFile, logger)); mPhases.add(new ExecutionPhase(mHostExecutors, mExecutionContext, hostExecutorBuilder, localCommandFactory, templateDefaults, succeededLogDir, failedLogDir, testParser.parse(), mExecutedTests, mFailedTests, logger)); mPhases.add(new ReportingPhase(mHostExecutors, localCommandFactory, templateDefaults, logger)); } public int run() { int result = 0; boolean error = false; List<String> messages = Lists.newArrayList(); Map<String, Long> elapsedTimes = Maps.newTreeMap(); try { mLogger.info("Running tests with " + mConfiguration); for(Phase phase : mPhases) { String msg = "Executing " + phase.getClass().getName(); mLogger.info(msg); messages.add(msg); long start = System.currentTimeMillis(); try { phase.execute(); } finally { long elapsedTime = TimeUnit.MINUTES.convert((System.currentTimeMillis() - start), TimeUnit.MILLISECONDS); elapsedTimes.put(phase.getClass().getSimpleName(), elapsedTime); } } if(!mFailedTests.isEmpty()) { throw new TestsFailedException(mFailedTests.size() + " tests failed"); } } catch(Throwable throwable) { mLogger.error("Test run exited with an unexpected error", throwable); // NonZeroExitCodeExceptions can have long messages and should be // trimmable when published to the JIRA via the JiraService if(throwable instanceof NonZeroExitCodeException) { messages.add("Tests exited with: " + throwable.getClass().getSimpleName()); for(String line : Strings.nullToEmpty(throwable.getMessage()).split("\n")) { messages.add(line); } } else { messages.add("Tests exited with: " + throwable.getClass().getSimpleName() + ": " + throwable.getMessage()); } error = true; } finally { for(HostExecutor hostExecutor : mHostExecutors) { hostExecutor.shutdownNow(); if(hostExecutor.isBad()) { mExecutionContext.addBadHost(hostExecutor.getHost()); } } mSshCommandExecutor.shutdownNow(); mRsyncCommandExecutor.shutdownNow(); mExecutor.shutdownNow(); SortedSet<String> failedTests = new TreeSet<String>(mFailedTests); if(failedTests.isEmpty()) { mLogger.info(String.format("%d failed tests", failedTests.size())); } else { mLogger.warn(String.format("%d failed tests", failedTests.size())); } for(String failingTestName : failedTests) { mLogger.warn(failingTestName); } mLogger.info("Executed " + mExecutedTests.size() + " tests"); for(Map.Entry<String, Long> entry : elapsedTimes.entrySet()) { mLogger.info(String.format("PERF: Phase %s took %d minutes", entry.getKey(), entry.getValue())); } publishJiraComment(error, messages, failedTests); if(error || !mFailedTests.isEmpty()) { result = 1; } } return result; } private void publishJiraComment(boolean error, List<String> messages, SortedSet<String> failedTests) { if(mConfiguration.getJiraName().isEmpty()) { mLogger.info("Skipping JIRA comment as name is empty."); return; } if(mConfiguration.getJiraUrl().isEmpty()) { mLogger.info("Skipping JIRA comment as URL is empty."); return; } if(mConfiguration.getJiraUser().isEmpty()) { mLogger.info("Skipping JIRA comment as user is empty."); return; } if(mConfiguration.getJiraPassword().isEmpty()) { mLogger.info("Skipping JIRA comment as password is empty."); return; } JIRAService jira = new JIRAService(mLogger, mConfiguration, mBuildTag); jira.postComment(error, mExecutedTests.size(), failedTests, messages); } public static class Builder { public PTest build(TestConfiguration configuration, ExecutionContext executionContext, String buildTag, File logDir, LocalCommandFactory localCommandFactory, SSHCommandExecutor sshCommandExecutor, RSyncCommandExecutor rsyncCommandExecutor, Logger logger) throws Exception { return new PTest(configuration, executionContext, buildTag, logDir, localCommandFactory, sshCommandExecutor, rsyncCommandExecutor, logger); } } private static final String PROPERTIES = "properties"; private static final String REPOSITORY = TestConfiguration.REPOSITORY; private static final String REPOSITORY_NAME = TestConfiguration.REPOSITORY_NAME; private static final String BRANCH = TestConfiguration.BRANCH; private static final String PATCH = "patch"; private static final String JAVA_HOME = TestConfiguration.JAVA_HOME; private static final String JAVA_HOME_TEST = TestConfiguration.JAVA_HOME_TEST; private static final String ANT_TEST_ARGS = TestConfiguration.ANT_TEST_ARGS; private static final String ANT_ENV_OPTS = TestConfiguration.ANT_ENV_OPTS; private static final String ANT_TEST_TARGET = TestConfiguration.ANT_TEST_TARGET; /** * All args override properties file settings except * for this one which is additive. */ private static final String ANT_ARG = "D"; public static void main(String[] args) throws Exception { LOG.info("Args " + Arrays.toString(args)); CommandLineParser parser = new GnuParser(); Options options = new Options(); options.addOption(null, PROPERTIES, true, "properties file"); options.addOption(null, REPOSITORY, true, "Overrides git repository in properties file"); options.addOption(null, REPOSITORY_NAME, true, "Overrides git repository *name* in properties file"); options.addOption(null, BRANCH, true, "Overrides git branch in properties file"); options.addOption(null, PATCH, true, "URI to patch, either file:/// or http(s)://"); options.addOption(ANT_ARG, null, true, "Supplemntal ant arguments"); options.addOption(null, JAVA_HOME, true, "Java Home for compiling and running tests (unless " + JAVA_HOME_TEST + " is specified)"); options.addOption(null, JAVA_HOME_TEST, true, "Java Home for running tests (optional)"); options.addOption(null, ANT_TEST_ARGS, true, "Arguments to ant test on slave nodes only"); options.addOption(null, ANT_ENV_OPTS, true, "ANT_OPTS environment variable setting"); CommandLine commandLine = parser.parse(options, args); if(!commandLine.hasOption(PROPERTIES)) { throw new IllegalArgumentException(Joiner.on(" "). join(PTest.class.getName(), "--" + PROPERTIES,"config.properties")); } String testConfigurationFile = commandLine.getOptionValue(PROPERTIES); ExecutionContextConfiguration executionContextConfiguration = ExecutionContextConfiguration. fromFile(testConfigurationFile); String buildTag = System.getenv("BUILD_TAG") == null ? "undefined-" + System.currentTimeMillis() : System.getenv("BUILD_TAG"); File logDir = Dirs.create(new File(executionContextConfiguration.getGlobalLogDirectory(), buildTag)); LogDirectoryCleaner cleaner = new LogDirectoryCleaner(new File(executionContextConfiguration. getGlobalLogDirectory()), 5); cleaner.setName("LogCleaner-" + executionContextConfiguration.getGlobalLogDirectory()); cleaner.setDaemon(true); cleaner.start(); TestConfiguration conf = TestConfiguration.fromFile(testConfigurationFile, LOG); String repository = Strings.nullToEmpty(commandLine.getOptionValue(REPOSITORY)).trim(); if(!repository.isEmpty()) { conf.setRepository(repository); } String repositoryName = Strings.nullToEmpty(commandLine.getOptionValue(REPOSITORY_NAME)).trim(); if(!repositoryName.isEmpty()) { conf.setRepositoryName(repositoryName); } String branch = Strings.nullToEmpty(commandLine.getOptionValue(BRANCH)).trim(); if(!branch.isEmpty()) { conf.setBranch(branch); } String patch = Strings.nullToEmpty(commandLine.getOptionValue(PATCH)).trim(); if(!patch.isEmpty()) { conf.setPatch(patch); } String javaHome = Strings.nullToEmpty(commandLine.getOptionValue(JAVA_HOME)).trim(); if(!javaHome.isEmpty()) { conf.setJavaHome(javaHome); } String javaHomeForTests = Strings.nullToEmpty(commandLine.getOptionValue(JAVA_HOME_TEST)).trim(); if(!javaHomeForTests.isEmpty()) { conf.setJavaHomeForTests(javaHomeForTests); } String antTestArgs = Strings.nullToEmpty(commandLine.getOptionValue(ANT_TEST_ARGS)).trim(); if(!antTestArgs.isEmpty()) { conf.setAntTestArgs(antTestArgs); } String antEnvOpts = Strings.nullToEmpty(commandLine.getOptionValue(ANT_ENV_OPTS)).trim(); if(!antEnvOpts.isEmpty()) { conf.setAntEnvOpts(antEnvOpts); } String antTestTarget = Strings.nullToEmpty(commandLine.getOptionValue(ANT_TEST_TARGET)).trim(); if(!antTestTarget.isEmpty()) { conf.setAntTestTarget(antTestTarget); } String[] supplementalAntArgs = commandLine.getOptionValues(ANT_ARG); if(supplementalAntArgs != null && supplementalAntArgs.length > 0) { String antArgs = Strings.nullToEmpty(conf.getAntArgs()); if(!(antArgs.isEmpty() || antArgs.endsWith(" "))) { antArgs += " "; } antArgs += "-" + ANT_ARG + Joiner.on(" -" + ANT_ARG).join(supplementalAntArgs); conf.setAntArgs(antArgs); } ExecutionContextProvider executionContextProvider = null; ExecutionContext executionContext = null; int exitCode = 0; try { executionContextProvider = executionContextConfiguration .getExecutionContextProvider(); executionContext = executionContextProvider.createExecutionContext(); LocalCommandFactory localCommandFactory = new LocalCommandFactory(LOG); PTest ptest = new PTest(conf, executionContext, buildTag, logDir, localCommandFactory, new SSHCommandExecutor(LOG), new RSyncCommandExecutor(LOG, 10, localCommandFactory), LOG); exitCode = ptest.run(); } finally { if(executionContext != null) { executionContext.terminate(); } if(executionContextProvider != null) { executionContextProvider.close(); } } System.exit(exitCode); } }
[ "\"BUILD_TAG\"", "\"BUILD_TAG\"" ]
[]
[ "BUILD_TAG" ]
[]
["BUILD_TAG"]
java
1
0
hw_asr/utils/parse_config.py
import json import logging import os from datetime import datetime from functools import reduce, partial from operator import getitem from pathlib import Path from hw_asr.logger import setup_logging from hw_asr.utils import read_json, write_json, ROOT_PATH class ConfigParser: def __init__(self, config, resume=None, modification=None, run_id=None): """ class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving and logging module. :param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example. :param resume: String, path to the checkpoint being loaded. :param modification: Dict keychain:value, specifying position values to be replaced from config dict. :param run_id: Unique Identifier for training processes. Used to save checkpoints and training log. Timestamp is being used as default """ # load config file and apply modification self._config = _update_config(config, modification) self.resume = resume # set save_dir where trained model and log will be saved. save_dir = Path(self.config["trainer"]["save_dir"]) exper_name = self.config["name"] if run_id is None: # use timestamp as default run-id run_id = datetime.now().strftime(r"%m%d_%H%M%S") self._save_dir = save_dir / "models" / exper_name / run_id self._log_dir = save_dir / "log" / exper_name / run_id # make directory for saving checkpoints and log. exist_ok = run_id == "" self.save_dir.mkdir(parents=True, exist_ok=exist_ok) self.log_dir.mkdir(parents=True, exist_ok=exist_ok) # save updated config file to the checkpoint dir write_json(self.config, self.save_dir / "config.json") # configure logging module setup_logging(self.log_dir) self.log_levels = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG} @classmethod def from_args(cls, args, options=""): """ Initialize this class from some cli arguments. Used in train, test. """ for opt in options: args.add_argument(*opt.flags, default=None, type=opt.type) if not isinstance(args, tuple): args = args.parse_args() if args.device is not None: os.environ["CUDA_VISIBLE_DEVICES"] = args.device if args.resume is not None: resume = Path(args.resume) cfg_fname = resume.parent / "config.json" else: msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example." assert args.config is not None, msg_no_cfg resume = None cfg_fname = Path(args.config) config = read_json(cfg_fname) if args.config and resume: # update new config for fine-tuning config.update(read_json(args.config)) # parse custom cli options into dictionary modification = { opt.target: getattr(args, _get_opt_name(opt.flags)) for opt in options } return cls(config, resume, modification) def init_obj(self, obj_dict, module, *args, **kwargs): """ Finds a function handle with the name given as 'type' in config, and returns the instance initialized with corresponding arguments given. `object = config.init_obj(config['param'], module, a, b=1)` is equivalent to `object = module.name(a, b=1)` """ module_name = obj_dict["type"] module_args = dict(obj_dict["args"]) #print(module_args) #print([k not in module_args for k in kwargs]) assert all( [k not in module_args for k in kwargs] ), "Overwriting kwargs given in config file is not allowed" module_args.update(kwargs) return getattr(module, module_name)(*args, **module_args) def init_ftn(self, name, module, *args, **kwargs): """ Finds a function handle with the name given as 'type' in config, and returns the function with given arguments fixed with functools.partial. `function = config.init_ftn('name', module, a, b=1)` is equivalent to `function = lambda *args, **kwargs: module.name(a, *args, b=1, **kwargs)`. """ module_name = self[name]["type"] module_args = dict(self[name]["args"]) assert all( [k not in module_args for k in kwargs] ), "Overwriting kwargs given in config file is not allowed" module_args.update(kwargs) return partial(getattr(module, module_name), *args, **module_args) def __getitem__(self, name): """Access items like ordinary dict.""" return self.config[name] def get_logger(self, name, verbosity=2): msg_verbosity = "verbosity option {} is invalid. Valid options are {}.".format( verbosity, self.log_levels.keys() ) assert verbosity in self.log_levels, msg_verbosity logger = logging.getLogger(name) logger.setLevel(self.log_levels[verbosity]) return logger # setting read-only attributes @property def config(self): return self._config @property def save_dir(self): return self._save_dir @property def log_dir(self): return self._log_dir @classmethod def get_default_configs(cls): config_path = ROOT_PATH / 'hw_asr' / 'config.json' with config_path.open() as f: return cls(json.load(f)) # helper functions to update config dict with custom cli options def _update_config(config, modification): if modification is None: return config for k, v in modification.items(): if v is not None: _set_by_path(config, k, v) return config def _get_opt_name(flags): for flg in flags: if flg.startswith("--"): return flg.replace("--", "") return flags[0].replace("--", "") def _set_by_path(tree, keys, value): """Set a value in a nested object in tree by sequence of keys.""" keys = keys.split(";") _get_by_path(tree, keys[:-1])[keys[-1]] = value def _get_by_path(tree, keys): """Access a nested object in tree by sequence of keys.""" return reduce(getitem, keys, tree)
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
.history/discordbot_20210613232210.py
import discord from discord.ext import commands from discord.ext.commands import Bot import os import traceback bot = commands.Bot(command_prefix='/') token = os.environ['DISCORD_BOT_TOKEN'] @bot.event async def on_command_error(ctx, error): orig_error = getattr(error, "original", error) error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format()) await ctx.send(error_msg) @bot.command() async def csm(ctx): embed = discord.Embed(title="choco stupid mountain",description="",url = "https://clips.twitch.tv/GoodReliableArmadilloDoggo-QAW30SL4Rrgfkdrl",color=0xff0000) embed.add_field(name="choco stupid mountain(long_ver)",value="",url = ) await ctx.send(embed=embed) bot.run(token)
[]
[]
[ "DISCORD_BOT_TOKEN" ]
[]
["DISCORD_BOT_TOKEN"]
python
1
0
src/python/bcc/__init__.py
# Copyright 2015 PLUMgrid # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import atexit import ctypes as ct import fcntl import json import os import re import struct import errno import sys from .libbcc import lib, bcc_symbol, bcc_symbol_option, bcc_stacktrace_build_id, _SYM_CB_TYPE from .table import Table, PerfEventArray from .perf import Perf from .utils import get_online_cpus, printb, _assert_is_bytes, ArgString from .version import __version__ from .disassembler import disassemble_prog, decode_map try: basestring except NameError: # Python 3 basestring = str _probe_limit = 1000 _num_open_probes = 0 # for tests def _get_num_open_probes(): global _num_open_probes return _num_open_probes TRACEFS = "/sys/kernel/debug/tracing" # Debug flags # Debug output compiled LLVM IR. DEBUG_LLVM_IR = 0x1 # Debug output loaded BPF bytecode and register state on branches. DEBUG_BPF = 0x2 # Debug output pre-processor result. DEBUG_PREPROCESSOR = 0x4 # Debug output ASM instructions embedded with source. DEBUG_SOURCE = 0x8 # Debug output register state on all instructions in addition to DEBUG_BPF. DEBUG_BPF_REGISTER_STATE = 0x10 # Debug BTF. DEBUG_BTF = 0x20 class SymbolCache(object): def __init__(self, pid): self.cache = lib.bcc_symcache_new( pid, ct.cast(None, ct.POINTER(bcc_symbol_option))) def resolve(self, addr, demangle): """ Return a tuple of the symbol (function), its offset from the beginning of the function, and the module in which it lies. For example: ("start_thread", 0x202, "/usr/lib/.../libpthread-2.24.so") If the symbol cannot be found but we know which module it is in, return the module name and the offset from the beginning of the module. If we don't even know the module, return the absolute address as the offset. """ sym = bcc_symbol() if demangle: res = lib.bcc_symcache_resolve(self.cache, addr, ct.byref(sym)) else: res = lib.bcc_symcache_resolve_no_demangle(self.cache, addr, ct.byref(sym)) if res < 0: if sym.module and sym.offset: return (None, sym.offset, ct.cast(sym.module, ct.c_char_p).value) return (None, addr, None) if demangle: name_res = sym.demangle_name lib.bcc_symbol_free_demangle_name(ct.byref(sym)) else: name_res = sym.name return (name_res, sym.offset, ct.cast(sym.module, ct.c_char_p).value) def resolve_name(self, module, name): module = _assert_is_bytes(module) name = _assert_is_bytes(name) addr = ct.c_ulonglong() if lib.bcc_symcache_resolve_name(self.cache, module, name, ct.byref(addr)) < 0: return -1 return addr.value class PerfType: # From perf_type_id in uapi/linux/perf_event.h HARDWARE = 0 SOFTWARE = 1 class PerfHWConfig: # From perf_hw_id in uapi/linux/perf_event.h CPU_CYCLES = 0 INSTRUCTIONS = 1 CACHE_REFERENCES = 2 CACHE_MISSES = 3 BRANCH_INSTRUCTIONS = 4 BRANCH_MISSES = 5 BUS_CYCLES = 6 STALLED_CYCLES_FRONTEND = 7 STALLED_CYCLES_BACKEND = 8 REF_CPU_CYCLES = 9 class PerfSWConfig: # From perf_sw_id in uapi/linux/perf_event.h CPU_CLOCK = 0 TASK_CLOCK = 1 PAGE_FAULTS = 2 CONTEXT_SWITCHES = 3 CPU_MIGRATIONS = 4 PAGE_FAULTS_MIN = 5 PAGE_FAULTS_MAJ = 6 ALIGNMENT_FAULTS = 7 EMULATION_FAULTS = 8 DUMMY = 9 BPF_OUTPUT = 10 class BPF(object): # From bpf_prog_type in uapi/linux/bpf.h SOCKET_FILTER = 1 KPROBE = 2 SCHED_CLS = 3 SCHED_ACT = 4 TRACEPOINT = 5 XDP = 6 PERF_EVENT = 7 CGROUP_SKB = 8 CGROUP_SOCK = 9 LWT_IN = 10 LWT_OUT = 11 LWT_XMIT = 12 SOCK_OPS = 13 SK_SKB = 14 CGROUP_DEVICE = 15 SK_MSG = 16 RAW_TRACEPOINT = 17 CGROUP_SOCK_ADDR = 18 # from xdp_action uapi/linux/bpf.h XDP_ABORTED = 0 XDP_DROP = 1 XDP_PASS = 2 XDP_TX = 3 XDP_REDIRECT = 4 _probe_repl = re.compile(b"[^a-zA-Z0-9_]") _sym_caches = {} _bsymcache = lib.bcc_buildsymcache_new() _auto_includes = { "linux/time.h": ["time"], "linux/fs.h": ["fs", "file"], "linux/blkdev.h": ["bio", "request"], "linux/slab.h": ["alloc"], "linux/netdevice.h": ["sk_buff", "net_device"] } _syscall_prefixes = [ b"sys_", b"__x64_sys_", b"__x32_compat_sys_", b"__ia32_compat_sys_", b"__arm64_sys_", ] # BPF timestamps come from the monotonic clock. To be able to filter # and compare them from Python, we need to invoke clock_gettime. # Adapted from http://stackoverflow.com/a/1205762 CLOCK_MONOTONIC = 1 # see <linux/time.h> class timespec(ct.Structure): _fields_ = [('tv_sec', ct.c_long), ('tv_nsec', ct.c_long)] _librt = ct.CDLL('librt.so.1', use_errno=True) _clock_gettime = _librt.clock_gettime _clock_gettime.argtypes = [ct.c_int, ct.POINTER(timespec)] @classmethod def monotonic_time(cls): """monotonic_time() Returns the system monotonic time from clock_gettime, using the CLOCK_MONOTONIC constant. The time returned is in nanoseconds. """ t = cls.timespec() if cls._clock_gettime(cls.CLOCK_MONOTONIC, ct.byref(t)) != 0: errno = ct.get_errno() raise OSError(errno, os.strerror(errno)) return t.tv_sec * 1e9 + t.tv_nsec @classmethod def generate_auto_includes(cls, program_words): """ Generates #include statements automatically based on a set of recognized types such as sk_buff and bio. The input is all the words that appear in the BPF program, and the output is a (possibly empty) string of #include statements, such as "#include <linux/fs.h>". """ headers = "" for header, keywords in cls._auto_includes.items(): for keyword in keywords: for word in program_words: if keyword in word and header not in headers: headers += "#include <%s>\n" % header return headers # defined for compatibility reasons, to be removed Table = Table class Function(object): def __init__(self, bpf, name, fd): self.bpf = bpf self.name = name self.fd = fd @staticmethod def _find_file(filename): """ If filename is invalid, search in ./ of argv[0] """ if filename: if not os.path.isfile(filename): argv0 = ArgString(sys.argv[0]) t = b"/".join([os.path.abspath(os.path.dirname(argv0.__str__())), filename]) if os.path.isfile(t): filename = t else: raise Exception("Could not find file %s" % filename) return filename @staticmethod def find_exe(bin_path): """ find_exe(bin_path) Traverses the PATH environment variable, looking for the first directory that contains an executable file named bin_path, and returns the full path to that file, or None if no such file can be found. This is meant to replace invocations of the "which" shell utility, which doesn't have portable semantics for skipping aliases. """ # Source: http://stackoverflow.com/a/377028 def is_exe(fpath): return os.path.isfile(fpath) and \ os.access(fpath, os.X_OK) fpath, fname = os.path.split(bin_path) if fpath: if is_exe(bin_path): return bin_path else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, bin_path) if is_exe(exe_file): return exe_file return None def __init__(self, src_file=b"", hdr_file=b"", text=None, debug=0, cflags=[], usdt_contexts=[], allow_rlimit=True, device=None): """Create a new BPF module with the given source code. Note: All fields are marked as optional, but either `src_file` or `text` must be supplied, and not both. Args: src_file (Optional[str]): Path to a source file for the module hdr_file (Optional[str]): Path to a helper header file for the `src_file` text (Optional[str]): Contents of a source file for the module debug (Optional[int]): Flags used for debug prints, can be |'d together See "Debug flags" for explanation """ src_file = _assert_is_bytes(src_file) hdr_file = _assert_is_bytes(hdr_file) text = _assert_is_bytes(text) assert not (text and src_file) self.kprobe_fds = {} self.uprobe_fds = {} self.tracepoint_fds = {} self.raw_tracepoint_fds = {} self.perf_buffers = {} self.open_perf_events = {} self.tracefile = None atexit.register(self.cleanup) self.debug = debug self.funcs = {} self.tables = {} self.module = None cflags_array = (ct.c_char_p * len(cflags))() for i, s in enumerate(cflags): cflags_array[i] = bytes(ArgString(s)) if src_file: src_file = BPF._find_file(src_file) hdr_file = BPF._find_file(hdr_file) # files that end in ".b" are treated as B files. Everything else is a (BPF-)C file if src_file.endswith(b".b"): self.module = lib.bpf_module_create_b(src_file, hdr_file, self.debug, device) else: if src_file: # Read the BPF C source file into the text variable. This ensures, # that files and inline text are treated equally. with open(src_file, mode="rb") as file: text = file.read() ctx_array = (ct.c_void_p * len(usdt_contexts))() for i, usdt in enumerate(usdt_contexts): ctx_array[i] = ct.c_void_p(usdt.get_context()) usdt_text = lib.bcc_usdt_genargs(ctx_array, len(usdt_contexts)) if usdt_text is None: raise Exception("can't generate USDT probe arguments; " + "possible cause is missing pid when a " + "probe in a shared object has multiple " + "locations") text = usdt_text + text self.module = lib.bpf_module_create_c_from_string(text, self.debug, cflags_array, len(cflags_array), allow_rlimit, device) if not self.module: raise Exception("Failed to compile BPF module %s" % (src_file or "<text>")) for usdt_context in usdt_contexts: usdt_context.attach_uprobes(self) # If any "kprobe__" or "tracepoint__" or "raw_tracepoint__" # prefixed functions were defined, # they will be loaded and attached here. self._trace_autoload() def load_funcs(self, prog_type=KPROBE): """load_funcs(prog_type=KPROBE) Load all functions in this BPF module with the given type. Returns a list of the function handles.""" fns = [] for i in range(0, lib.bpf_num_functions(self.module)): func_name = lib.bpf_function_name(self.module, i) fns.append(self.load_func(func_name, prog_type)) return fns def load_func(self, func_name, prog_type, device = None): func_name = _assert_is_bytes(func_name) if func_name in self.funcs: return self.funcs[func_name] if not lib.bpf_function_start(self.module, func_name): raise Exception("Unknown program %s" % func_name) log_level = 0 if (self.debug & DEBUG_BPF_REGISTER_STATE): log_level = 2 elif (self.debug & DEBUG_BPF): log_level = 1 fd = lib.bcc_func_load(self.module, prog_type, func_name, lib.bpf_function_start(self.module, func_name), lib.bpf_function_size(self.module, func_name), lib.bpf_module_license(self.module), lib.bpf_module_kern_version(self.module), log_level, None, 0, device); if fd < 0: atexit.register(self.donothing) if ct.get_errno() == errno.EPERM: raise Exception("Need super-user privileges to run") errstr = os.strerror(ct.get_errno()) raise Exception("Failed to load BPF program %s: %s" % (func_name, errstr)) fn = BPF.Function(self, func_name, fd) self.funcs[func_name] = fn return fn def dump_func(self, func_name): """ Return the eBPF bytecodes for the specified function as a string """ func_name = _assert_is_bytes(func_name) if not lib.bpf_function_start(self.module, func_name): raise Exception("Unknown program %s" % func_name) start, = lib.bpf_function_start(self.module, func_name), size, = lib.bpf_function_size(self.module, func_name), return ct.string_at(start, size) def disassemble_func(self, func_name): bpfstr = self.dump_func(func_name) return disassemble_prog(func_name, bpfstr) def decode_table(self, table_name, sizeinfo=False): table_obj = self[table_name] table_type = lib.bpf_table_type_id(self.module, table_obj.map_id) return decode_map(table_name, table_obj, table_type, sizeinfo=sizeinfo) str2ctype = { u"_Bool": ct.c_bool, u"char": ct.c_char, u"wchar_t": ct.c_wchar, u"unsigned char": ct.c_ubyte, u"short": ct.c_short, u"unsigned short": ct.c_ushort, u"int": ct.c_int, u"unsigned int": ct.c_uint, u"long": ct.c_long, u"unsigned long": ct.c_ulong, u"long long": ct.c_longlong, u"unsigned long long": ct.c_ulonglong, u"float": ct.c_float, u"double": ct.c_double, u"long double": ct.c_longdouble, u"__int128": ct.c_int64 * 2, u"unsigned __int128": ct.c_uint64 * 2, } @staticmethod def _decode_table_type(desc): if isinstance(desc, basestring): return BPF.str2ctype[desc] anon = [] fields = [] for t in desc[1]: if len(t) == 2: fields.append((t[0], BPF._decode_table_type(t[1]))) elif len(t) == 3: if isinstance(t[2], list): fields.append((t[0], BPF._decode_table_type(t[1]) * t[2][0])) elif isinstance(t[2], int): fields.append((t[0], BPF._decode_table_type(t[1]), t[2])) elif isinstance(t[2], basestring) and ( t[2] == u"union" or t[2] == u"struct" or t[2] == u"struct_packed"): name = t[0] if name == "": name = "__anon%d" % len(anon) anon.append(name) fields.append((name, BPF._decode_table_type(t))) else: raise Exception("Failed to decode type %s" % str(t)) else: raise Exception("Failed to decode type %s" % str(t)) base = ct.Structure is_packed = False if len(desc) > 2: if desc[2] == u"union": base = ct.Union elif desc[2] == u"struct": base = ct.Structure elif desc[2] == u"struct_packed": base = ct.Structure is_packed = True if is_packed: cls = type(str(desc[0]), (base,), dict(_anonymous_=anon, _pack_=1, _fields_=fields)) else: cls = type(str(desc[0]), (base,), dict(_anonymous_=anon, _fields_=fields)) return cls def get_table(self, name, keytype=None, leaftype=None, reducer=None): name = _assert_is_bytes(name) map_id = lib.bpf_table_id(self.module, name) map_fd = lib.bpf_table_fd(self.module, name) if map_fd < 0: raise KeyError if not keytype: key_desc = lib.bpf_table_key_desc(self.module, name).decode("utf-8") if not key_desc: raise Exception("Failed to load BPF Table %s key desc" % name) keytype = BPF._decode_table_type(json.loads(key_desc)) if not leaftype: leaf_desc = lib.bpf_table_leaf_desc(self.module, name).decode("utf-8") if not leaf_desc: raise Exception("Failed to load BPF Table %s leaf desc" % name) leaftype = BPF._decode_table_type(json.loads(leaf_desc)) return Table(self, map_id, map_fd, keytype, leaftype, name, reducer=reducer) def __getitem__(self, key): if key not in self.tables: self.tables[key] = self.get_table(key) return self.tables[key] def __setitem__(self, key, leaf): self.tables[key] = leaf def __len__(self): return len(self.tables) def __delitem__(self, key): del self.tables[key] def __iter__(self): return self.tables.__iter__() @staticmethod def attach_raw_socket(fn, dev): dev = _assert_is_bytes(dev) if not isinstance(fn, BPF.Function): raise Exception("arg 1 must be of type BPF.Function") sock = lib.bpf_open_raw_sock(dev) if sock < 0: errstr = os.strerror(ct.get_errno()) raise Exception("Failed to open raw device %s: %s" % (dev, errstr)) res = lib.bpf_attach_socket(sock, fn.fd) if res < 0: errstr = os.strerror(ct.get_errno()) raise Exception("Failed to attach BPF to device %s: %s" % (dev, errstr)) fn.sock = sock @staticmethod def get_kprobe_functions(event_re): with open("%s/../kprobes/blacklist" % TRACEFS, "rb") as blacklist_f: blacklist = set([line.rstrip().split()[1] for line in blacklist_f]) fns = [] in_init_section = 0 in_irq_section = 0 with open("/proc/kallsyms", "rb") as avail_file: for line in avail_file: (t, fn) = line.rstrip().split()[1:3] # Skip all functions defined between __init_begin and # __init_end if in_init_section == 0: if fn == b'__init_begin': in_init_section = 1 continue elif in_init_section == 1: if fn == b'__init_end': in_init_section = 2 continue # Skip all functions defined between __irqentry_text_start and # __irqentry_text_end if in_irq_section == 0: if fn == b'__irqentry_text_start': in_irq_section = 1 continue elif in_irq_section == 1: if fn == b'__irqentry_text_end': in_irq_section = 2 continue # All functions defined as NOKPROBE_SYMBOL() start with the # prefix _kbl_addr_*, blacklisting them by looking at the name # allows to catch also those symbols that are defined in kernel # modules. if fn.startswith(b'_kbl_addr_'): continue # Explicitly blacklist perf-related functions, they are all # non-attachable. elif fn.startswith(b'__perf') or fn.startswith(b'perf_'): continue # Exclude all gcc 8's extra .cold functions elif re.match(b'^.*\.cold\.\d+$', fn): continue if (t.lower() in [b't', b'w']) and re.match(event_re, fn) \ and fn not in blacklist: fns.append(fn) return set(fns) # Some functions may appear more than once def _check_probe_quota(self, num_new_probes): global _num_open_probes if _num_open_probes + num_new_probes > _probe_limit: raise Exception("Number of open probes would exceed global quota") def _add_kprobe_fd(self, name, fd): global _num_open_probes self.kprobe_fds[name] = fd _num_open_probes += 1 def _del_kprobe_fd(self, name): global _num_open_probes del self.kprobe_fds[name] _num_open_probes -= 1 def _add_uprobe_fd(self, name, fd): global _num_open_probes self.uprobe_fds[name] = fd _num_open_probes += 1 def _del_uprobe_fd(self, name): global _num_open_probes del self.uprobe_fds[name] _num_open_probes -= 1 # Find current system's syscall prefix by testing on the BPF syscall. # If no valid value found, will return the first possible value which # would probably lead to error in later API calls. def get_syscall_prefix(self): for prefix in self._syscall_prefixes: if self.ksymname(b"%sbpf" % prefix) != -1: return prefix return self._syscall_prefixes[0] # Given a syscall's name, return the full Kernel function name with current # system's syscall prefix. For example, given "clone" the helper would # return "sys_clone" or "__x64_sys_clone". def get_syscall_fnname(self, name): name = _assert_is_bytes(name) return self.get_syscall_prefix() + name # Given a Kernel function name that represents a syscall but already has a # prefix included, transform it to current system's prefix. For example, # if "sys_clone" provided, the helper may translate it to "__x64_sys_clone". def fix_syscall_fnname(self, name): name = _assert_is_bytes(name) for prefix in self._syscall_prefixes: if name.startswith(prefix): return self.get_syscall_fnname(name[len(prefix):]) return name def attach_kprobe(self, event=b"", event_off=0, fn_name=b"", event_re=b""): event = _assert_is_bytes(event) fn_name = _assert_is_bytes(fn_name) event_re = _assert_is_bytes(event_re) # allow the caller to glob multiple functions together if event_re: matches = BPF.get_kprobe_functions(event_re) self._check_probe_quota(len(matches)) for line in matches: try: self.attach_kprobe(event=line, fn_name=fn_name) except: pass return self._check_probe_quota(1) fn = self.load_func(fn_name, BPF.KPROBE) ev_name = b"p_" + event.replace(b"+", b"_").replace(b".", b"_") fd = lib.bpf_attach_kprobe(fn.fd, 0, ev_name, event, event_off, 0) if fd < 0: raise Exception("Failed to attach BPF program %s to kprobe %s" % (fn_name, event)) self._add_kprobe_fd(ev_name, fd) return self def attach_kretprobe(self, event=b"", fn_name=b"", event_re=b"", maxactive=0): event = _assert_is_bytes(event) fn_name = _assert_is_bytes(fn_name) event_re = _assert_is_bytes(event_re) # allow the caller to glob multiple functions together if event_re: for line in BPF.get_kprobe_functions(event_re): try: self.attach_kretprobe(event=line, fn_name=fn_name, maxactive=maxactive) except: pass return self._check_probe_quota(1) fn = self.load_func(fn_name, BPF.KPROBE) ev_name = b"r_" + event.replace(b"+", b"_").replace(b".", b"_") fd = lib.bpf_attach_kprobe(fn.fd, 1, ev_name, event, 0, maxactive) if fd < 0: raise Exception("Failed to attach BPF program %s to kretprobe %s" % (fn_name, event)) self._add_kprobe_fd(ev_name, fd) return self def detach_kprobe_event(self, ev_name): if ev_name not in self.kprobe_fds: raise Exception("Kprobe %s is not attached" % ev_name) res = lib.bpf_close_perf_event_fd(self.kprobe_fds[ev_name]) if res < 0: raise Exception("Failed to close kprobe FD") res = lib.bpf_detach_kprobe(ev_name) if res < 0: raise Exception("Failed to detach BPF from kprobe") self._del_kprobe_fd(ev_name) def detach_kprobe(self, event): event = _assert_is_bytes(event) ev_name = b"p_" + event.replace(b"+", b"_").replace(b".", b"_") self.detach_kprobe_event(ev_name) def detach_kretprobe(self, event): event = _assert_is_bytes(event) ev_name = b"r_" + event.replace(b"+", b"_").replace(b".", b"_") self.detach_kprobe_event(ev_name) @staticmethod def attach_xdp(dev, fn, flags=0): ''' This function attaches a BPF function to a device on the device driver level (XDP) ''' dev = _assert_is_bytes(dev) if not isinstance(fn, BPF.Function): raise Exception("arg 1 must be of type BPF.Function") res = lib.bpf_attach_xdp(dev, fn.fd, flags) if res < 0: err_no = ct.get_errno() if err_no == errno.EBADMSG: raise Exception("Internal error while attaching BPF to device,"+ " try increasing the debug level!") else: errstr = os.strerror(err_no) raise Exception("Failed to attach BPF to device %s: %s" % (dev, errstr)) @staticmethod def remove_xdp(dev, flags=0): ''' This function removes any BPF function from a device on the device driver level (XDP) ''' dev = _assert_is_bytes(dev) res = lib.bpf_attach_xdp(dev, -1, flags) if res < 0: errstr = os.strerror(ct.get_errno()) raise Exception("Failed to detach BPF from device %s: %s" % (dev, errstr)) @classmethod def _check_path_symbol(cls, module, symname, addr, pid): module = _assert_is_bytes(module) symname = _assert_is_bytes(symname) sym = bcc_symbol() c_pid = 0 if pid == -1 else pid if lib.bcc_resolve_symname( module, symname, addr or 0x0, c_pid, ct.cast(None, ct.POINTER(bcc_symbol_option)), ct.byref(sym), ) < 0: raise Exception("could not determine address of symbol %s" % symname) module_path = ct.cast(sym.module, ct.c_char_p).value lib.bcc_procutils_free(sym.module) return module_path, sym.offset @staticmethod def find_library(libname): libname = _assert_is_bytes(libname) res = lib.bcc_procutils_which_so(libname, 0) if not res: return None libpath = ct.cast(res, ct.c_char_p).value lib.bcc_procutils_free(res) return libpath @staticmethod def get_tracepoints(tp_re): results = [] events_dir = os.path.join(TRACEFS, "events") for category in os.listdir(events_dir): cat_dir = os.path.join(events_dir, category) if not os.path.isdir(cat_dir): continue for event in os.listdir(cat_dir): evt_dir = os.path.join(cat_dir, event) if os.path.isdir(evt_dir): tp = ("%s:%s" % (category, event)) if re.match(tp_re.decode(), tp): results.append(tp) return results @staticmethod def tracepoint_exists(category, event): evt_dir = os.path.join(TRACEFS, "events", category, event) return os.path.isdir(evt_dir) def attach_tracepoint(self, tp=b"", tp_re=b"", fn_name=b""): """attach_tracepoint(tp="", tp_re="", fn_name="") Run the bpf function denoted by fn_name every time the kernel tracepoint specified by 'tp' is hit. The optional parameters pid, cpu, and group_fd can be used to filter the probe. The tracepoint specification is simply the tracepoint category and the tracepoint name, separated by a colon. For example: sched:sched_switch, syscalls:sys_enter_bind, etc. Instead of a tracepoint name, a regular expression can be provided in tp_re. The program will then attach to tracepoints that match the provided regular expression. To obtain a list of kernel tracepoints, use the tplist tool or cat the file /sys/kernel/debug/tracing/available_events. Examples: BPF(text).attach_tracepoint(tp="sched:sched_switch", fn_name="on_switch") BPF(text).attach_tracepoint(tp_re="sched:.*", fn_name="on_switch") """ tp = _assert_is_bytes(tp) tp_re = _assert_is_bytes(tp_re) fn_name = _assert_is_bytes(fn_name) if tp_re: for tp in BPF.get_tracepoints(tp_re): self.attach_tracepoint(tp=tp, fn_name=fn_name) return fn = self.load_func(fn_name, BPF.TRACEPOINT) (tp_category, tp_name) = tp.split(b':') fd = lib.bpf_attach_tracepoint(fn.fd, tp_category, tp_name) if fd < 0: raise Exception("Failed to attach BPF program %s to tracepoint %s" % (fn_name, tp)) self.tracepoint_fds[tp] = fd return self def attach_raw_tracepoint(self, tp=b"", fn_name=b""): """attach_raw_tracepoint(self, tp=b"", fn_name=b"") Run the bpf function denoted by fn_name every time the kernel tracepoint specified by 'tp' is hit. The bpf function should be loaded as a RAW_TRACEPOINT type. The fn_name is the kernel tracepoint name, e.g., sched_switch, sys_enter_bind, etc. Examples: BPF(text).attach_raw_tracepoint(tp="sched_switch", fn_name="on_switch") """ tp = _assert_is_bytes(tp) if tp in self.raw_tracepoint_fds: raise Exception("Raw tracepoint %s has been attached" % tp) fn_name = _assert_is_bytes(fn_name) fn = self.load_func(fn_name, BPF.RAW_TRACEPOINT) fd = lib.bpf_attach_raw_tracepoint(fn.fd, tp) if fd < 0: raise Exception("Failed to attach BPF to raw tracepoint") self.raw_tracepoint_fds[tp] = fd; return self def detach_raw_tracepoint(self, tp=b""): """detach_raw_tracepoint(tp="") Stop running the bpf function that is attached to the kernel tracepoint specified by 'tp'. Example: bpf.detach_raw_tracepoint("sched_switch") """ tp = _assert_is_bytes(tp) if tp not in self.raw_tracepoint_fds: raise Exception("Raw tracepoint %s is not attached" % tp) os.close(self.raw_tracepoint_fds[tp]) del self.raw_tracepoint_fds[tp] @staticmethod def support_raw_tracepoint(): # kernel symbol "bpf_find_raw_tracepoint" indicates raw_tracepint support if BPF.ksymname("bpf_find_raw_tracepoint") != -1 or \ BPF.ksymname("bpf_get_raw_tracepoint") != -1: return True return False def detach_tracepoint(self, tp=b""): """detach_tracepoint(tp="") Stop running a bpf function that is attached to the kernel tracepoint specified by 'tp'. Example: bpf.detach_tracepoint("sched:sched_switch") """ tp = _assert_is_bytes(tp) if tp not in self.tracepoint_fds: raise Exception("Tracepoint %s is not attached" % tp) res = lib.bpf_close_perf_event_fd(self.tracepoint_fds[tp]) if res < 0: raise Exception("Failed to detach BPF from tracepoint") (tp_category, tp_name) = tp.split(b':') res = lib.bpf_detach_tracepoint(tp_category, tp_name) if res < 0: raise Exception("Failed to detach BPF from tracepoint") del self.tracepoint_fds[tp] def _attach_perf_event(self, progfd, ev_type, ev_config, sample_period, sample_freq, pid, cpu, group_fd): res = lib.bpf_attach_perf_event(progfd, ev_type, ev_config, sample_period, sample_freq, pid, cpu, group_fd) if res < 0: raise Exception("Failed to attach BPF to perf event") return res def attach_perf_event(self, ev_type=-1, ev_config=-1, fn_name=b"", sample_period=0, sample_freq=0, pid=-1, cpu=-1, group_fd=-1): fn_name = _assert_is_bytes(fn_name) fn = self.load_func(fn_name, BPF.PERF_EVENT) res = {} if cpu >= 0: res[cpu] = self._attach_perf_event(fn.fd, ev_type, ev_config, sample_period, sample_freq, pid, cpu, group_fd) else: for i in get_online_cpus(): res[i] = self._attach_perf_event(fn.fd, ev_type, ev_config, sample_period, sample_freq, pid, i, group_fd) self.open_perf_events[(ev_type, ev_config)] = res def detach_perf_event(self, ev_type=-1, ev_config=-1): try: fds = self.open_perf_events[(ev_type, ev_config)] except KeyError: raise Exception("Perf event type {} config {} not attached".format( ev_type, ev_config)) res = 0 for fd in fds.values(): res = lib.bpf_close_perf_event_fd(fd) or res if res != 0: raise Exception("Failed to detach BPF from perf event") del self.open_perf_events[(ev_type, ev_config)] @staticmethod def get_user_functions(name, sym_re): return set([name for (name, _) in BPF.get_user_functions_and_addresses(name, sym_re)]) @staticmethod def get_user_addresses(name, sym_re): """ We are returning addresses here instead of symbol names because it turns out that the same name may appear multiple times with different addresses, and the same address may appear multiple times with the same name. We can't attach a uprobe to the same address more than once, so it makes sense to return the unique set of addresses that are mapped to a symbol that matches the provided regular expression. """ return set([address for (_, address) in BPF.get_user_functions_and_addresses(name, sym_re)]) @staticmethod def get_user_functions_and_addresses(name, sym_re): name = _assert_is_bytes(name) sym_re = _assert_is_bytes(sym_re) addresses = [] def sym_cb(sym_name, addr): dname = sym_name if re.match(sym_re, dname): addresses.append((dname, addr)) return 0 res = lib.bcc_foreach_function_symbol(name, _SYM_CB_TYPE(sym_cb)) if res < 0: raise Exception("Error %d enumerating symbols in %s" % (res, name)) return addresses def _get_uprobe_evname(self, prefix, path, addr, pid): if pid == -1: return b"%s_%s_0x%x" % (prefix, self._probe_repl.sub(b"_", path), addr) else: # if pid is valid, put pid in the name, so different pid # can have different event names return b"%s_%s_0x%x_%d" % (prefix, self._probe_repl.sub(b"_", path), addr, pid) def attach_uprobe(self, name=b"", sym=b"", sym_re=b"", addr=None, fn_name=b"", pid=-1): """attach_uprobe(name="", sym="", sym_re="", addr=None, fn_name="" pid=-1) Run the bpf function denoted by fn_name every time the symbol sym in the library or binary 'name' is encountered. Optional parameters pid, cpu, and group_fd can be used to filter the probe. The real address addr may be supplied in place of sym, in which case sym must be set to its default value. If the file is a non-PIE executable, addr must be a virtual address, otherwise it must be an offset relative to the file load address. Instead of a symbol name, a regular expression can be provided in sym_re. The uprobe will then attach to symbols that match the provided regular expression. Libraries can be given in the name argument without the lib prefix, or with the full path (/usr/lib/...). Binaries can be given only with the full path (/bin/sh). If a PID is given, the uprobe will attach to the version of the library used by the process. Example: BPF(text).attach_uprobe("c", "malloc") BPF(text).attach_uprobe("/usr/bin/python", "main") """ name = _assert_is_bytes(name) sym = _assert_is_bytes(sym) sym_re = _assert_is_bytes(sym_re) fn_name = _assert_is_bytes(fn_name) if sym_re: addresses = BPF.get_user_addresses(name, sym_re) self._check_probe_quota(len(addresses)) for sym_addr in addresses: self.attach_uprobe(name=name, addr=sym_addr, fn_name=fn_name, pid=pid) return (path, addr) = BPF._check_path_symbol(name, sym, addr, pid) self._check_probe_quota(1) fn = self.load_func(fn_name, BPF.KPROBE) ev_name = self._get_uprobe_evname(b"p", path, addr, pid) fd = lib.bpf_attach_uprobe(fn.fd, 0, ev_name, path, addr, pid) if fd < 0: raise Exception("Failed to attach BPF to uprobe") self._add_uprobe_fd(ev_name, fd) return self def attach_uretprobe(self, name=b"", sym=b"", sym_re=b"", addr=None, fn_name=b"", pid=-1): """attach_uretprobe(name="", sym="", sym_re="", addr=None, fn_name="" pid=-1) Run the bpf function denoted by fn_name every time the symbol sym in the library or binary 'name' finishes execution. See attach_uprobe for meaning of additional parameters. """ name = _assert_is_bytes(name) sym = _assert_is_bytes(sym) sym_re = _assert_is_bytes(sym_re) fn_name = _assert_is_bytes(fn_name) if sym_re: for sym_addr in BPF.get_user_addresses(name, sym_re): self.attach_uretprobe(name=name, addr=sym_addr, fn_name=fn_name, pid=pid) return (path, addr) = BPF._check_path_symbol(name, sym, addr, pid) self._check_probe_quota(1) fn = self.load_func(fn_name, BPF.KPROBE) ev_name = self._get_uprobe_evname(b"r", path, addr, pid) fd = lib.bpf_attach_uprobe(fn.fd, 1, ev_name, path, addr, pid) if fd < 0: raise Exception("Failed to attach BPF to uretprobe") self._add_uprobe_fd(ev_name, fd) return self def detach_uprobe_event(self, ev_name): if ev_name not in self.uprobe_fds: raise Exception("Uprobe %s is not attached" % ev_name) res = lib.bpf_close_perf_event_fd(self.uprobe_fds[ev_name]) if res < 0: raise Exception("Failed to detach BPF from uprobe") res = lib.bpf_detach_uprobe(ev_name) if res < 0: raise Exception("Failed to detach BPF from uprobe") self._del_uprobe_fd(ev_name) def detach_uprobe(self, name=b"", sym=b"", addr=None, pid=-1): """detach_uprobe(name="", sym="", addr=None, pid=-1) Stop running a bpf function that is attached to symbol 'sym' in library or binary 'name'. """ name = _assert_is_bytes(name) sym = _assert_is_bytes(sym) (path, addr) = BPF._check_path_symbol(name, sym, addr, pid) ev_name = self._get_uprobe_evname(b"p", path, addr, pid) self.detach_uprobe_event(ev_name) def detach_uretprobe(self, name=b"", sym=b"", addr=None, pid=-1): """detach_uretprobe(name="", sym="", addr=None, pid=-1) Stop running a bpf function that is attached to symbol 'sym' in library or binary 'name'. """ name = _assert_is_bytes(name) sym = _assert_is_bytes(sym) (path, addr) = BPF._check_path_symbol(name, sym, addr, pid) ev_name = self._get_uprobe_evname(b"r", path, addr, pid) self.detach_uprobe_event(ev_name) def _trace_autoload(self): for i in range(0, lib.bpf_num_functions(self.module)): func_name = lib.bpf_function_name(self.module, i) if func_name.startswith(b"kprobe__"): fn = self.load_func(func_name, BPF.KPROBE) self.attach_kprobe( event=self.fix_syscall_fnname(func_name[8:]), fn_name=fn.name) elif func_name.startswith(b"kretprobe__"): fn = self.load_func(func_name, BPF.KPROBE) self.attach_kretprobe( event=self.fix_syscall_fnname(func_name[11:]), fn_name=fn.name) elif func_name.startswith(b"tracepoint__"): fn = self.load_func(func_name, BPF.TRACEPOINT) tp = fn.name[len(b"tracepoint__"):].replace(b"__", b":") self.attach_tracepoint(tp=tp, fn_name=fn.name) elif func_name.startswith(b"raw_tracepoint__"): fn = self.load_func(func_name, BPF.RAW_TRACEPOINT) tp = fn.name[len(b"raw_tracepoint__"):] self.attach_raw_tracepoint(tp=tp, fn_name=fn.name) def trace_open(self, nonblocking=False): """trace_open(nonblocking=False) Open the trace_pipe if not already open """ if not self.tracefile: self.tracefile = open("%s/trace_pipe" % TRACEFS, "rb") if nonblocking: fd = self.tracefile.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) return self.tracefile def trace_fields(self, nonblocking=False): """trace_fields(nonblocking=False) Read from the kernel debug trace pipe and return a tuple of the fields (task, pid, cpu, flags, timestamp, msg) or None if no line was read (nonblocking=True) """ while True: line = self.trace_readline(nonblocking) if not line and nonblocking: return (None,) * 6 # don't print messages related to lost events if line.startswith(b"CPU:"): continue task = line[:16].lstrip() line = line[17:] ts_end = line.find(b":") pid, cpu, flags, ts = line[:ts_end].split() cpu = cpu[1:-1] # line[ts_end:] will have ": [sym_or_addr]: msgs" # For trace_pipe debug output, the addr typically # is invalid (e.g., 0x1). For kernel 4.12 or earlier, # if address is not able to match a kernel symbol, # nothing will be printed out. For kernel 4.13 and later, # however, the illegal address will be printed out. # Hence, both cases are handled here. line = line[ts_end + 1:] sym_end = line.find(b":") msg = line[sym_end + 2:] return (task, int(pid), int(cpu), flags, float(ts), msg) def trace_readline(self, nonblocking=False): """trace_readline(nonblocking=False) Read from the kernel debug trace pipe and return one line If nonblocking is False, this will block until ctrl-C is pressed. """ trace = self.trace_open(nonblocking) line = None try: line = trace.readline(1024).rstrip() except IOError: pass return line def trace_print(self, fmt=None): """trace_print(self, fmt=None) Read from the kernel debug trace pipe and print on stdout. If fmt is specified, apply as a format string to the output. See trace_fields for the members of the tuple example: trace_print(fmt="pid {1}, msg = {5}") """ while True: if fmt: fields = self.trace_fields(nonblocking=False) if not fields: continue line = fmt.format(*fields) else: line = self.trace_readline(nonblocking=False) print(line) sys.stdout.flush() @staticmethod def _sym_cache(pid): """_sym_cache(pid) Returns a symbol cache for the specified PID. The kernel symbol cache is accessed by providing any PID less than zero. """ if pid < 0 and pid != -1: pid = -1 if not pid in BPF._sym_caches: BPF._sym_caches[pid] = SymbolCache(pid) return BPF._sym_caches[pid] @staticmethod def sym(addr, pid, show_module=False, show_offset=False, demangle=True): """sym(addr, pid, show_module=False, show_offset=False) Translate a memory address into a function name for a pid, which is returned. When show_module is True, the module name is also included. When show_offset is True, the instruction offset as a hexadecimal number is also included in the string. A pid of less than zero will access the kernel symbol cache. Example output when both show_module and show_offset are True: "start_thread+0x202 [libpthread-2.24.so]" Example output when both show_module and show_offset are False: "start_thread" """ #addr is of type stacktrace_build_id #so invoke the bsym address resolver typeofaddr = str(type(addr)) if typeofaddr.find('bpf_stack_build_id') != -1: sym = bcc_symbol() b = bcc_stacktrace_build_id() b.status = addr.status b.build_id = addr.build_id b.u.offset = addr.offset; res = lib.bcc_buildsymcache_resolve(BPF._bsymcache, ct.byref(b), ct.byref(sym)) if res < 0: if sym.module and sym.offset: name,offset,module = (None, sym.offset, ct.cast(sym.module, ct.c_char_p).value) else: name, offset, module = (None, addr, None) else: name, offset, module = (sym.name, sym.offset, ct.cast(sym.module, ct.c_char_p).value) else: name, offset, module = BPF._sym_cache(pid).resolve(addr, demangle) offset = b"+0x%x" % offset if show_offset and name is not None else b"" name = name or b"[unknown]" name = name + offset module = b" [%s]" % os.path.basename(module) \ if show_module and module is not None else b"" return name + module @staticmethod def ksym(addr, show_module=False, show_offset=False): """ksym(addr) Translate a kernel memory address into a kernel function name, which is returned. When show_module is True, the module name ("kernel") is also included. When show_offset is true, the instruction offset as a hexadecimal number is also included in the string. Example output when both show_module and show_offset are True: "default_idle+0x0 [kernel]" """ return BPF.sym(addr, -1, show_module, show_offset, False) @staticmethod def ksymname(name): """ksymname(name) Translate a kernel name into an address. This is the reverse of ksym. Returns -1 when the function name is unknown.""" return BPF._sym_cache(-1).resolve_name(None, name) def num_open_kprobes(self): """num_open_kprobes() Get the number of open K[ret]probes. Can be useful for scenarios where event_re is used while attaching and detaching probes. """ return len(self.kprobe_fds) def num_open_uprobes(self): """num_open_uprobes() Get the number of open U[ret]probes. """ return len(self.uprobe_fds) def num_open_tracepoints(self): """num_open_tracepoints() Get the number of open tracepoints. """ return len(self.tracepoint_fds) def perf_buffer_poll(self, timeout = -1): """perf_buffer_poll(self) Poll from all open perf ring buffers, calling the callback that was provided when calling open_perf_buffer for each entry. """ readers = (ct.c_void_p * len(self.perf_buffers))() for i, v in enumerate(self.perf_buffers.values()): readers[i] = v lib.perf_reader_poll(len(readers), readers, timeout) def kprobe_poll(self, timeout = -1): """kprobe_poll(self) Deprecated. Use perf_buffer_poll instead. """ self.perf_buffer_poll(timeout) def free_bcc_memory(self): return lib.bcc_free_memory() @staticmethod def add_module(modname): """add_module(modname) Add a library or exe to buildsym cache """ try: lib.bcc_buildsymcache_add_module(BPF._bsymcache, modname.encode()) except Exception as e: print("Error adding module to build sym cache"+str(e)) def donothing(self): """the do nothing exit handler""" def cleanup(self): # Clean up opened probes for k, v in list(self.kprobe_fds.items()): self.detach_kprobe_event(k) for k, v in list(self.uprobe_fds.items()): self.detach_uprobe_event(k) for k, v in list(self.tracepoint_fds.items()): self.detach_tracepoint(k) for k, v in list(self.raw_tracepoint_fds.items()): self.detach_raw_tracepoint(k) # Clean up opened perf ring buffer and perf events table_keys = list(self.tables.keys()) for key in table_keys: if isinstance(self.tables[key], PerfEventArray): del self.tables[key] for (ev_type, ev_config) in list(self.open_perf_events.keys()): self.detach_perf_event(ev_type, ev_config) if self.tracefile: self.tracefile.close() self.tracefile = None if self.module: lib.bpf_module_destroy(self.module) self.module = None def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.cleanup() from .usdt import USDT, USDTException
[]
[]
[ "PATH" ]
[]
["PATH"]
python
1
0
pinot-plugins/pinot-file-system/pinot-gcs/src/test/java/org/apache/pinot/plugin/filesystem/TestGcsPinotFS.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.pinot.plugin.filesystem; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.io.Closer; import java.io.BufferedWriter; import java.io.IOException; import java.io.UncheckedIOException; import java.net.URI; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.stream.IntStream; import java.util.stream.Stream; import org.apache.pinot.spi.env.PinotConfiguration; import org.testng.SkipException; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import static java.lang.String.format; import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.UUID.randomUUID; import static java.util.stream.Collectors.toList; import static java.util.stream.Collectors.toSet; import static org.apache.commons.io.FileUtils.deleteDirectory; import static org.apache.pinot.plugin.filesystem.GcsPinotFS.GCP_KEY; import static org.apache.pinot.plugin.filesystem.GcsPinotFS.PROJECT_ID; import static org.apache.pinot.plugin.filesystem.GcsUri.createGcsUri; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; /** * Integration test for GcsPinotFS * * Credentials to connect to gcs must be supplied via environment variables. * The following environment variables are used to connect to gcs: * GOOGLE_APPLICATION_CREDENTIALS: path to gcp json key file * GCP_PROJECT: the name of the project to use * GCS_BUCKET: the name of the bucket to use * * The reason we do not use RemoteStorageHelper is that create bucket * permissions are required. Pinot only needs to test creating objects. * The bucket should already exist. * * If credentials are not supplied then all tests are skipped. */ @Test(singleThreaded = true) public class TestGcsPinotFS { private static final String DATA_DIR_PREFIX = "testing-data"; private GcsPinotFS _pinotFS; private GcsUri _dataDir; private final Closer _closer = Closer.create(); @BeforeClass public void setup() { String keyFile = System.getenv("GOOGLE_APPLICATION_CREDENTIALS"); String projectId = System.getenv("GCP_PROJECT"); String bucket = System.getenv("GCS_BUCKET"); if (keyFile != null && projectId != null && bucket != null) { _pinotFS = new GcsPinotFS(); _pinotFS.init(new PinotConfiguration(ImmutableMap.<String, Object> builder() .put(PROJECT_ID,projectId) .put(GCP_KEY, keyFile) .build())); _dataDir = createGcsUri(bucket, DATA_DIR_PREFIX + randomUUID()); } } @AfterClass public void tearDown() throws Exception { if (_pinotFS != null) { _pinotFS.delete(_dataDir.getUri(), true); _closer.close(); } } private void skipIfNotConfigured() { if (_pinotFS == null) { throw new SkipException("No google credentials supplied."); } } private Path createLocalTempDirectory() throws IOException { Path temporaryDirectory = Files.createDirectory(Paths.get("/tmp/" + DATA_DIR_PREFIX + "-" + randomUUID())); _closer.register(() -> deleteDirectory(temporaryDirectory.toFile())); return temporaryDirectory; } private GcsUri createTempDirectoryGcsUri() { return _dataDir.resolve("dir-" + randomUUID()); } /** * Resolved gcs uri does not contain trailing delimiter, e.g. "/", * as the GcsUri.resolve() method uses Path.resolve() semantics. * * @param gcsUri * @return path with trailing delimiter */ private static GcsUri appendSlash(GcsUri gcsUri) { return createGcsUri(gcsUri.getBucketName(), gcsUri.getPrefix()); } private List<String> writeToFile(Path file, int count) { List<String> lines = IntStream.range(0, count) .mapToObj(n -> "line " + n) .collect(toList()); try (BufferedWriter writer = Files.newBufferedWriter(file, UTF_8)) { lines.forEach(line -> { try { writer.write(line); writer.newLine(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } catch (IOException e) { throw new UncheckedIOException(e); } return lines; } private Stream<GcsUri> listFilesToStream(GcsUri gcsUri) throws IOException { return Arrays.asList(_pinotFS.listFiles(gcsUri.getUri(), true)).stream() .map(URI::create) .map(GcsUri::new); } @Test public void testGcs() throws Exception { skipIfNotConfigured(); // Create empty file Path localTmpDir = createLocalTempDirectory(); Path emptyFile = localTmpDir.resolve("empty"); emptyFile.toFile().createNewFile(); // Create non-empty file Path file1 = localTmpDir.resolve("file1"); List<String> expectedLinesFromFile =writeToFile(file1, 10); List<String> actualLinesFromFile = Files.readAllLines(file1, UTF_8); // Sanity check assertEquals(actualLinesFromFile, expectedLinesFromFile); // Gcs Temporary Directory GcsUri gcsDirectoryUri = createTempDirectoryGcsUri(); Set<GcsUri> expectedElements = new HashSet<>(); expectedElements.add(appendSlash(gcsDirectoryUri)); // Test mkdir() // Create the temp directory, which also creates any missing parent paths _pinotFS.mkdir(gcsDirectoryUri.getUri()); GcsUri emptyFileGcsUri = gcsDirectoryUri.resolve("empty"); expectedElements.add(emptyFileGcsUri); // Copy the empty file _pinotFS.copyFromLocalFile(emptyFile.toFile(), emptyFileGcsUri.getUri()); expectedElements.add(appendSlash(emptyFileGcsUri)); // Test making a subdirectory with the same name as an object. // This is allowed in gcs _pinotFS.mkdir(emptyFileGcsUri.getUri()); GcsUri nonEmptyFileGcsUri = gcsDirectoryUri.resolve("empty/file1"); expectedElements.add(nonEmptyFileGcsUri); // Copy the non empty file to the new folder _pinotFS.copyFromLocalFile(file1.toFile(), nonEmptyFileGcsUri.getUri()); // Test listFiles() // Check that all the files are there assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElements); // Check that the non-empty file has the expected contents Path nonEmptyFileFromGcs = localTmpDir.resolve("nonEmptyFileFromGcs"); _pinotFS.copyToLocalFile(nonEmptyFileGcsUri.getUri(), nonEmptyFileFromGcs.toFile()); assertEquals(Files.readAllLines(nonEmptyFileFromGcs), expectedLinesFromFile); // Test gcs copy single file to file GcsUri nonEmptyFileGcsUriCopy = gcsDirectoryUri.resolve("empty/file2"); _pinotFS.copy(nonEmptyFileGcsUri.getUri(), nonEmptyFileGcsUriCopy.getUri()); assertTrue(listFilesToStream(gcsDirectoryUri).anyMatch(uri -> uri.equals(nonEmptyFileGcsUriCopy)), format("Cannot find file '%s'", nonEmptyFileGcsUriCopy)); // Test gcs delete single file _pinotFS.delete(nonEmptyFileGcsUriCopy.getUri(), false); assertTrue(listFilesToStream(gcsDirectoryUri).allMatch(uri -> !uri.equals(nonEmptyFileGcsUriCopy)), format("Unexpected: found file '%s'", nonEmptyFileGcsUriCopy)); // Test copy directory -> directory GcsUri gcsDirectoryUriCopy = createTempDirectoryGcsUri(); _pinotFS.copy(gcsDirectoryUri.getUri(), gcsDirectoryUriCopy.getUri()); Set<GcsUri> expectedElementsCopy = new HashSet<>(); String directoryName = Paths.get(gcsDirectoryUri.getPath()).getFileName().toString(); String directoryCopyName = Paths.get(gcsDirectoryUriCopy.getPath()).getFileName().toString(); for (GcsUri element : ImmutableList.copyOf(expectedElements)) { expectedElementsCopy.add(createGcsUri(element.getBucketName(), element.getPath().replace(directoryName, directoryCopyName))); } expectedElementsCopy.addAll(expectedElements); assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElementsCopy); // Test delete directory _pinotFS.delete(gcsDirectoryUriCopy.getUri(), true); assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElements); // Test move directory _pinotFS.move(gcsDirectoryUri.getUri(), gcsDirectoryUriCopy.getUri(), true); expectedElementsCopy.removeAll(expectedElements); assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElementsCopy); // Test move file to different directory GcsUri movedFileGcsUri = gcsDirectoryUriCopy.resolve("empty/file1"); assertTrue(listFilesToStream(gcsDirectoryUri).allMatch(uri -> !uri.equals(nonEmptyFileGcsUri))); _pinotFS.move(movedFileGcsUri.getUri(), nonEmptyFileGcsUri.getUri(), false); assertTrue(listFilesToStream(gcsDirectoryUri).anyMatch(uri -> uri.equals(nonEmptyFileGcsUri))); } }
[ "\"GOOGLE_APPLICATION_CREDENTIALS\"", "\"GCP_PROJECT\"", "\"GCS_BUCKET\"" ]
[]
[ "GCS_BUCKET", "GCP_PROJECT", "GOOGLE_APPLICATION_CREDENTIALS" ]
[]
["GCS_BUCKET", "GCP_PROJECT", "GOOGLE_APPLICATION_CREDENTIALS"]
java
3
0
examples/moving_mnist/plot_log.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Author: lfurushchev <[email protected]> import os import click import pandas as pd if not os.getenv("DISPLAY", None): import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt @click.command() @click.argument("log_path") @click.option("--out", type=str, default="plot.png") def plot(log_path, out): df = pd.read_json(log_path) df.plot(x="iteration", y=["main/loss"]) plt.savefig(out) plt.show() if __name__ == '__main__': plot()
[]
[]
[ "DISPLAY" ]
[]
["DISPLAY"]
python
1
0
test/io/pipe/test_classification.py
import unittest import os from fastNLP.io import DataBundle from fastNLP.io.pipe.classification import SSTPipe, SST2Pipe, IMDBPipe, YelpFullPipe, YelpPolarityPipe from fastNLP.io.pipe.classification import ChnSentiCorpPipe, THUCNewsPipe, WeiboSenti100kPipe @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") class TestClassificationPipe(unittest.TestCase): def test_process_from_file(self): for pipe in [YelpPolarityPipe, SST2Pipe, IMDBPipe, YelpFullPipe, SSTPipe]: with self.subTest(pipe=pipe): print(pipe) data_bundle = pipe(tokenizer='raw').process_from_file() print(data_bundle) class TestRunPipe(unittest.TestCase): def test_load(self): for pipe in [IMDBPipe]: data_bundle = pipe(tokenizer='raw').process_from_file('test/data_for_tests/io/imdb') print(data_bundle) @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") class TestCNClassificationPipe(unittest.TestCase): def test_process_from_file(self): for pipe in [ChnSentiCorpPipe]: with self.subTest(pipe=pipe): data_bundle = pipe(bigrams=True, trigrams=True).process_from_file() print(data_bundle) class TestRunClassificationPipe(unittest.TestCase): def test_process_from_file(self): data_set_dict = { 'yelp.p': ('test/data_for_tests/io/yelp_review_polarity', YelpPolarityPipe, (6, 6, 6), (1176, 2), False), 'yelp.f': ('test/data_for_tests/io/yelp_review_full', YelpFullPipe, (6, 6, 6), (1023, 5), False), 'sst-2': ('test/data_for_tests/io/SST-2', SST2Pipe, (5, 5, 5), (139, 2), True), 'sst': ('test/data_for_tests/io/SST', SSTPipe, (6, 354, 6), (232, 5), False), 'imdb': ('test/data_for_tests/io/imdb', IMDBPipe, (6, 6, 6), (1670, 2), False), 'ChnSentiCorp': ('test/data_for_tests/io/ChnSentiCorp', ChnSentiCorpPipe, (6, 6, 6), (529, 1296, 1483, 2), False), 'Chn-THUCNews': ('test/data_for_tests/io/THUCNews', THUCNewsPipe, (9, 9, 9), (1864, 9), False), 'Chn-WeiboSenti100k': ('test/data_for_tests/io/WeiboSenti100k', WeiboSenti100kPipe, (7, 6, 6), (452, 2), False), } for k, v in data_set_dict.items(): path, pipe, data_set, vocab, warns = v with self.subTest(pipe=pipe): if 'Chn' not in k: if warns: with self.assertWarns(Warning): data_bundle = pipe(tokenizer='raw').process_from_file(path) else: data_bundle = pipe(tokenizer='raw').process_from_file(path) else: data_bundle = pipe(bigrams=True, trigrams=True).process_from_file(path) self.assertTrue(isinstance(data_bundle, DataBundle)) self.assertEqual(len(data_set), data_bundle.num_dataset) for x, y in zip(data_set, data_bundle.iter_datasets()): name, dataset = y self.assertEqual(x, len(dataset)) self.assertEqual(len(vocab), data_bundle.num_vocab) for x, y in zip(vocab, data_bundle.iter_vocabs()): name, vocabs = y self.assertEqual(x, len(vocabs))
[]
[]
[]
[]
[]
python
0
0
src/main/java/finddigits/Solution.java
package finddigits; import java.io.*; import java.math.*; import java.security.*; import java.text.*; import java.util.*; import java.util.concurrent.*; import java.util.regex.*; public class Solution { // Complete the findDigits function below. static int findDigits(int n) { List<Integer> list = new ArrayList<>(); int numberToSelected = n; int count = 0; while (numberToSelected != 0){ list.add(numberToSelected%10); numberToSelected = numberToSelected/10; } for (Integer divider : list){ if(divider != 0 && n%divider == 0 ){ count++; } } return count; } private static final Scanner scanner = new Scanner(System.in); public static void main(String[] args) throws IOException { BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH"))); int t = scanner.nextInt(); scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?"); for (int tItr = 0; tItr < t; tItr++) { int n = scanner.nextInt(); scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?"); int result = findDigits(n); bufferedWriter.write(String.valueOf(result)); bufferedWriter.newLine(); } bufferedWriter.close(); scanner.close(); } }
[ "\"OUTPUT_PATH\"" ]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
java
1
0
alembic/env.py
from logging.config import fileConfig from sqlalchemy import engine_from_config from sqlalchemy import pool from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from alembic import context import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), "..")) from anyway.core.database import Base # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config config.set_main_option('sqlalchemy.url', os.environ.get('DATABASE_URL')) # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, ) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online()
[]
[]
[ "DATABASE_URL" ]
[]
["DATABASE_URL"]
python
1
0
erniekit/utils/multi_process_eval.py
# *_*coding:utf-8 *_* """multi process to test""" from __future__ import unicode_literals import os import json import time import six import sys from io import open import math import time import subprocess import collections import logging import numpy as np from erniekit.utils.util_helper import convert_to_unicode class MultiProcessEval(object): """multi process test for classifiy tasks""" def __init__(self, output_path, eval_phase, dev_count, gpu_id): self.output_path = output_path self.eval_phase = eval_phase self.dev_count = dev_count self.gpu_id = gpu_id if not os.path.exists(self.output_path): os.makedirs(self.output_path) def write_result(self, eval_index, save_lists=None, name_list=None): """write result to hard disk""" outfile = self.output_path + "/" + self.eval_phase if len(eval_index) > 0: outfile_part = outfile + ".part" + str(self.gpu_id) writer = open(outfile_part, "w") write_content = "\t".join([str(i) for i in eval_index]) + "\n" writer.write(write_content) writer.close() if save_lists is not None and name_list is not None: #save_list_name = ["qids", "labels", "scores"] save_list_name = name_list for idx in range(len(save_list_name)): save_list = json.dumps(save_lists[idx]) savefile_part = outfile + "." + save_list_name[idx] + ".part." + str(self.gpu_id) list_writer = open(savefile_part, "w") list_writer.write(convert_to_unicode(save_list)) list_writer.close() tmp_writer = open(self.output_path + "/" + self.eval_phase + "_dec_finish." + str(self.gpu_id), "w") tmp_writer.close() def concat_result(self, num_eval_index, num_list=None, name_list=None): """read result from hard disk and concat them""" outfile = self.output_path + "/" + self.eval_phase eval_index_all = [0.0] * num_eval_index eval_list_all = collections.defaultdict(list) while True: ret = subprocess.check_output(['find', self.output_path, '-maxdepth', '1', '-name', self.eval_phase + '_dec_finish.*']) if six.PY3: ret = ret.decode() ret = ret.rstrip().split("\n") if len(ret) != self.dev_count: time.sleep(1) continue for dev_cnt in range(self.dev_count): if num_eval_index > 0: fin = open(outfile + ".part" + str(dev_cnt)) cur_eval_index_all = fin.readline().strip().split("\t") cur_eval_index_all = [float(i) for i in cur_eval_index_all] eval_index_all = list(map(lambda x: x[0] + x[1], zip(eval_index_all, cur_eval_index_all))) if num_list is not None and name_list is not None: #save_list_name = ["qids", "labels", "scores"] save_list_name = name_list for idx in range(len(save_list_name)): fin_list = open(outfile + "." + save_list_name[idx] + ".part." + str(dev_cnt), "r") eval_list_all[save_list_name[idx]].extend(json.loads(fin_list.read())) os.system("rm " + outfile + ".*part*") os.system("rm " + self.output_path + "/" + self.eval_phase + "_dec_finish.*") break if num_list is not None: return eval_index_all, eval_list_all return eval_index_all class MultiProcessEvalForMrc(object): """multi process test for mrc tasks""" def __init__(self, output_path, eval_phase, dev_count, gpu_id, tokenizer): self.output_path = output_path self.eval_phase = eval_phase self.dev_count = dev_count self.gpu_id = gpu_id self.tokenizer = tokenizer if not os.path.exists(self.output_path): os.makedirs(self.output_path) if not os.path.exists("./output"): os.makedirs('./output') self.output_prediction_file = os.path.join('./output', self.eval_phase + "_predictions.json") self.output_nbest_file = os.path.join('./output', self.eval_phase + "_nbest_predictions.json") def write_result(self, all_results): """write result to hard disk""" outfile = self.output_path + "/" + self.eval_phase outfile_part = outfile + ".part" + str(self.gpu_id) writer = open(outfile_part, "w") save_dict = json.dumps(all_results, ensure_ascii=False) if isinstance(save_dict, bytes): save_dict = save_dict.decode("utf-8") writer.write(save_dict) writer.close() tmp_writer = open(self.output_path + "/" + self.eval_phase + "_dec_finish." + str(self.gpu_id), "w") tmp_writer.close() def concat_result(self, RawResult): """read result from hard disk and concat them""" outfile = self.output_path + "/" + self.eval_phase all_results_read = [] while True: ret = subprocess.check_output(['find', self.output_path, '-maxdepth', '1', '-name', self.eval_phase + '_dec_finish.*']) if six.PY3: ret = ret.decode() ret = ret.rstrip().split("\n") if len(ret) != self.dev_count: time.sleep(2) continue for dev_cnt in range(self.dev_count): fpath = outfile + ".part" + str(dev_cnt) fin_read = open(fpath, "rb") succeed = False i = 0 while not succeed: i + 1 try: cur_rawresult = json.loads(fin_read.read()) succeed = True except Exception as err: logging.warning(\ 'faild to parse content {} in file {}, error {}'\ .format(fin_read.read(), fpath, err)) time.sleep(2) if i > 5: break if not succeed: raise ValueError('faild to parse content {} in file {}'\ .format(fin_read.read(), fpath)) for tp in cur_rawresult: assert len(tp) == 3 all_results_read.append( RawResult( unique_id=tp[0], start_logits=tp[1], end_logits=tp[2])) os.system("rm " + outfile + ".*part*") os.system("rm " + self.output_path + "/" + self.eval_phase + "_dec_finish.*") break return all_results_read def write_predictions(self, all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file): """Write final predictions to the json file and log-odds of null if needed.""" logging.info("Writing predictions to: %s" % (output_prediction_file)) logging.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", [ "feature_index", "start_index", "end_index", "start_logit", "end_logit" ]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = self._get_best_indexes(result.start_logits, n_best_size) end_indexes = self._get_best_indexes(result.end_logits, n_best_size) for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1 )] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = "".join(orig_tokens) final_text = self.get_final_text(tok_text, orig_text, do_lower_case) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction( text="empty", start_logit=0.0, end_logit=0.0)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) probs = self._compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 all_predictions[example.qas_id] = nbest_json[0]["text"] all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w", encoding='utf-8') as writer: writer.write(json.dumps(all_predictions, indent=4, ensure_ascii=False) + "\n") with open(output_nbest_file, "w", encoding='utf-8') as writer: writer.write(json.dumps(all_nbest_json, indent=4, ensure_ascii=False) + "\n") #return all_predictions def get_predictions(self, all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file): """get final predictions to the json file and log-odds of null if needed.""" example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", [ "feature_index", "start_index", "end_index", "start_logit", "end_logit" ]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = self._get_best_indexes(result.start_logits, n_best_size) end_indexes = self._get_best_indexes(result.end_logits, n_best_size) for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1 )] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = "".join(orig_tokens) final_text = self.get_final_text(tok_text, orig_text, do_lower_case) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction( text="empty", start_logit=0.0, end_logit=0.0)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) probs = self._compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 all_predictions[example.qas_id] = nbest_json[0]["text"] all_nbest_json[example.qas_id] = nbest_json return all_predictions, all_nbest_json def get_final_text(self, pred_text, orig_text, do_lower_case): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heruistic between # `pred_text` and `orig_text` to get a character-to-charcter alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tok_text = " ".join(self.tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in six.iteritems(tok_ns_to_s_map): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _get_best_indexes(self, logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted( enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(self, scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs class MultiNodeWriter(object): """multi process test for classifiy tasks""" def __init__(self, output_path, dev_count, gpu_id, use_multi_node=False): self.current_eval_phase = None self.current_writer = None self.current_outfile = None self.output_path = output_path self.dev_count = dev_count self.gpu_id = gpu_id self.node_suffix = "" self.use_multi_node = use_multi_node self.unused = False node_nums = int(os.getenv("PADDLE_NODES_NUM", "1")) dev_per_node = self.dev_count // node_nums self.node_id = self.gpu_id // dev_per_node if node_nums != 1: self.dev_count = dev_per_node self.gpu_id = self.gpu_id % dev_per_node if use_multi_node: self.node_suffix = ".node" + str(self.node_id) elif self.node_id != 0: self.unused = True if not os.path.exists(self.output_path): os.makedirs(self.output_path) def init_writer(self, eval_phase): """open output file""" if self.unused: return self.current_eval_phase = eval_phase self.current_outfile = self.output_path + "/" + self.current_eval_phase + self.node_suffix self.current_writer = open(self.current_outfile + ".part" + str(self.gpu_id), "w", encoding="utf8") def write_result_list(self, result_list): """write batch result""" if self.unused: return assert self.current_writer, "Writer not init before write" for result in result_list: if six.PY2: write_content = [str(i) if not isinstance(i, unicode) else i for i in result] else: write_content = [str(i) for i in result] self.current_writer.write("\t".join(write_content) + "\n") def finalize_writer(self, key_num=2, sort_key_index=1, remove_sort_key=True): """merge result""" if self.unused: return assert self.current_writer, "Writer not init before finalize" self.current_writer.close() tmp_file_prefix = self.current_eval_phase + "_dec_finish." tmp_writer = open(self.output_path + "/" + tmp_file_prefix + str(self.gpu_id), "w") tmp_writer.close() outfile = None if self.gpu_id == 0: while True: ret = subprocess.check_output(['find', self.output_path, '-maxdepth', '1', '-name', tmp_file_prefix + '*']) if six.PY3: ret = ret.decode() ret = ret.rstrip().split("\n") if len(ret) != self.dev_count: time.sleep(1) continue outfile = self.current_outfile merge_str = "".join(["$" + str(index) for index in range(1, key_num + 1) if not (remove_sort_key and index == sort_key_index)]) sort_cmd = ["sort", outfile + ".part*", "-n", "-t", "$'\\t'", "-k", str(sort_key_index)] awk_cmd = ["awk", "-F", '"\\t"', "'{print " + merge_str + "}'", ">" + outfile] merge_cmd = sort_cmd + ["|"] + awk_cmd rm_cmd = ["rm", outfile + ".part*", self.output_path + "/" + tmp_file_prefix + "*"] os.system(" ".join(merge_cmd)) os.system(" ".join(rm_cmd)) break self.current_writer = None self.current_eval_phase = None self.current_outfile = None return outfile class MultiProcessEvalForErnieDoc(object): """multi process test for mrc tasks""" def __init__(self, output_path, eval_phase, dev_count, gpu_id): self.output_path = output_path self.eval_phase = eval_phase self.dev_count = dev_count self.gpu_id = gpu_id if not os.path.exists(self.output_path): os.makedirs(self.output_path) if not os.path.exists("./output"): os.makedirs('./output') self.output_prediction_file = os.path.join('./output', self.eval_phase + "_predictions.json") self.output_nbest_file = os.path.join('./output', self.eval_phase + "_nbest_predictions.json") def write_result(self, all_results): """write result to hard disk""" outfile = self.output_path + "/" + self.eval_phase outfile_part = outfile + ".part" + str(self.gpu_id) writer = open(outfile_part, "w") save_dict = json.dumps(all_results, ensure_ascii=False) if isinstance(save_dict, bytes): save_dict = save_dict.decode("utf-8") writer.write(save_dict) writer.close() tmp_writer = open(self.output_path + "/" + self.eval_phase + "_dec_finish." + str(self.gpu_id), "w") tmp_writer.close() def concat_result(self, RawResult): """read result from hard disk and concat them""" outfile = self.output_path + "/" + self.eval_phase all_results_read = [] while True: ret = subprocess.check_output(['find', self.output_path, '-maxdepth', '1', '-name', self.eval_phase + '_dec_finish.*']) #_, ret = commands.getstatusoutput('find ' + self.output_path + \ # ' -maxdepth 1 -name ' + self.eval_phase + '"_dec_finish.*"') if six.PY3: ret = ret.decode() ret = ret.rstrip().split("\n") if len(ret) != self.dev_count: time.sleep(1) continue for dev_cnt in range(self.dev_count): fin_read = open(outfile + ".part" + str(dev_cnt), "rb") cur_rawresult = json.loads(fin_read.read()) for tp in cur_rawresult: assert len(tp) == 3 all_results_read.append( RawResult( unique_id=tp[0], prob=tp[1], label=tp[2])) #subprocess.check_output(["rm ", outfile + ".part*"]) #subprocess.check_output(["rm ", self.output_path + "/" + self.eval_phase + "_dec_finish.*"]) #commands.getstatusoutput("rm " + outfile + ".part*") #commands.getstatusoutput("rm " + self.output_path + "/" + self.eval_phase + "_dec_finish.*") os.system("rm " + outfile + "*.part*") os.system("rm " + self.output_path + "/" + self.eval_phase + "_dec_finish.*") break return all_results_read def write_predictions(self, all_results): """Write final predictions to the json file and log-odds of null if needed.""" unique_id_to_result = collections.defaultdict(list) for result in all_results: unique_id_to_result[result.unique_id].append(result) print("data num: %d" % (len(unique_id_to_result))) all_probs = [] all_labels = [] for key, value in unique_id_to_result.items(): prob_for_one_sample = [result.prob for result in value] label_for_one_sample = [result.label for result in value] assert len(set(label_for_one_sample)) == 1 prob_emb = np.sum(np.array(prob_for_one_sample), axis=0).tolist() all_probs.append(prob_emb) all_labels.append(list(set(label_for_one_sample))) assert len(all_labels) == len(all_probs) all_labels = np.array(all_labels).astype("float32") all_probs = np.array(all_probs).astype("float32") return len(unique_id_to_result), all_labels, all_probs
[]
[]
[ "PADDLE_NODES_NUM" ]
[]
["PADDLE_NODES_NUM"]
python
1
0
tun/tun_darwin.go
/* SPDX-License-Identifier: MIT * * Copyright (C) 2017-2019 WireGuard LLC. All Rights Reserved. */ package tun import ( "fmt" "io/ioutil" "net" "os" "syscall" "unsafe" "golang.org/x/net/ipv6" "golang.org/x/sys/unix" ) const utunControlName = "com.apple.net.utun_control" // _CTLIOCGINFO value derived from /usr/include/sys/{kern_control,ioccom}.h const _CTLIOCGINFO = (0x40000000 | 0x80000000) | ((100 & 0x1fff) << 16) | uint32(byte('N'))<<8 | 3 // sockaddr_ctl specifeid in /usr/include/sys/kern_control.h type sockaddrCtl struct { scLen uint8 scFamily uint8 ssSysaddr uint16 scID uint32 scUnit uint32 scReserved [5]uint32 } type NativeTun struct { name string tunFile *os.File events chan TUNEvent errors chan error routeSocket int } var sockaddrCtlSize uintptr = 32 func (tun *NativeTun) routineRouteListener(tunIfindex int) { var ( statusUp bool statusMTU int ) defer func() { close(tun.events) tun.routeSocket = -1 }() data := make([]byte, os.Getpagesize()) for { retry: n, err := unix.Read(tun.routeSocket, data) if err != nil { if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINTR { goto retry } tun.errors <- err return } if n < 14 { continue } if data[3 /* type */] != unix.RTM_IFINFO { continue } ifindex := int(*(*uint16)(unsafe.Pointer(&data[12 /* ifindex */]))) if ifindex != tunIfindex { continue } iface, err := net.InterfaceByIndex(ifindex) if err != nil { tun.errors <- err return } // Up / Down event up := (iface.Flags & net.FlagUp) != 0 if up != statusUp && up { tun.events <- TUNEventUp } if up != statusUp && !up { tun.events <- TUNEventDown } statusUp = up // MTU changes if iface.MTU != statusMTU { tun.events <- TUNEventMTUUpdate } statusMTU = iface.MTU } } func CreateTUN(name string, mtu int) (TUNDevice, error) { ifIndex := -1 if name != "utun" { _, err := fmt.Sscanf(name, "utun%d", &ifIndex) if err != nil || ifIndex < 0 { return nil, fmt.Errorf("Interface name must be utun[0-9]*") } } fd, err := unix.Socket(unix.AF_SYSTEM, unix.SOCK_DGRAM, 2) if err != nil { return nil, err } var ctlInfo = &struct { ctlID uint32 ctlName [96]byte }{} copy(ctlInfo.ctlName[:], []byte(utunControlName)) _, _, errno := unix.Syscall( unix.SYS_IOCTL, uintptr(fd), uintptr(_CTLIOCGINFO), uintptr(unsafe.Pointer(ctlInfo)), ) if errno != 0 { return nil, fmt.Errorf("_CTLIOCGINFO: %v", errno) } sc := sockaddrCtl{ scLen: uint8(sockaddrCtlSize), scFamily: unix.AF_SYSTEM, ssSysaddr: 2, scID: ctlInfo.ctlID, scUnit: uint32(ifIndex) + 1, } scPointer := unsafe.Pointer(&sc) _, _, errno = unix.RawSyscall( unix.SYS_CONNECT, uintptr(fd), uintptr(scPointer), uintptr(sockaddrCtlSize), ) if errno != 0 { return nil, fmt.Errorf("SYS_CONNECT: %v", errno) } err = syscall.SetNonblock(fd, true) if err != nil { return nil, err } tun, err := CreateTUNFromFile(os.NewFile(uintptr(fd), ""), mtu) if err == nil && name == "utun" { fname := os.Getenv("WG_TUN_NAME_FILE") if fname != "" { ioutil.WriteFile(fname, []byte(tun.(*NativeTun).name+"\n"), 0400) } } return tun, err } func CreateTUNFromFile(file *os.File, mtu int) (TUNDevice, error) { tun := &NativeTun{ tunFile: file, events: make(chan TUNEvent, 10), errors: make(chan error, 5), } name, err := tun.Name() if err != nil { tun.tunFile.Close() return nil, err } tunIfindex, err := func() (int, error) { iface, err := net.InterfaceByName(name) if err != nil { return -1, err } return iface.Index, nil }() if err != nil { tun.tunFile.Close() return nil, err } tun.routeSocket, err = unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC) if err != nil { tun.tunFile.Close() return nil, err } go tun.routineRouteListener(tunIfindex) if mtu > 0 { err = tun.setMTU(mtu) if err != nil { tun.Close() return nil, err } } return tun, nil } func (tun *NativeTun) Name() (string, error) { var ifName struct { name [16]byte } ifNameSize := uintptr(16) var errno syscall.Errno tun.operateOnFd(func(fd uintptr) { _, _, errno = unix.Syscall6( unix.SYS_GETSOCKOPT, fd, 2, /* #define SYSPROTO_CONTROL 2 */ 2, /* #define UTUN_OPT_IFNAME 2 */ uintptr(unsafe.Pointer(&ifName)), uintptr(unsafe.Pointer(&ifNameSize)), 0) }) if errno != 0 { return "", fmt.Errorf("SYS_GETSOCKOPT: %v", errno) } tun.name = string(ifName.name[:ifNameSize-1]) return tun.name, nil } func (tun *NativeTun) File() *os.File { return tun.tunFile } func (tun *NativeTun) Events() chan TUNEvent { return tun.events } func (tun *NativeTun) Read(buff []byte, offset int) (int, error) { select { case err := <-tun.errors: return 0, err default: buff := buff[offset-4:] n, err := tun.tunFile.Read(buff[:]) if n < 4 { return 0, err } return n - 4, err } } func (tun *NativeTun) Write(buff []byte, offset int) (int, error) { // reserve space for header buff = buff[offset-4:] // add packet information header buff[0] = 0x00 buff[1] = 0x00 buff[2] = 0x00 if buff[4]>>4 == ipv6.Version { buff[3] = unix.AF_INET6 } else { buff[3] = unix.AF_INET } // write return tun.tunFile.Write(buff) } func (tun *NativeTun) Flush() error { //TODO: can flushing be implemented by buffering and using sendmmsg? return nil } func (tun *NativeTun) Close() error { var err2 error err1 := tun.tunFile.Close() if tun.routeSocket != -1 { unix.Shutdown(tun.routeSocket, unix.SHUT_RDWR) err2 = unix.Close(tun.routeSocket) } else if tun.events != nil { close(tun.events) } if err1 != nil { return err1 } return err2 } func (tun *NativeTun) setMTU(n int) error { // open datagram socket var fd int fd, err := unix.Socket( unix.AF_INET, unix.SOCK_DGRAM, 0, ) if err != nil { return err } defer unix.Close(fd) // do ioctl call var ifr [32]byte copy(ifr[:], tun.name) *(*uint32)(unsafe.Pointer(&ifr[unix.IFNAMSIZ])) = uint32(n) _, _, errno := unix.Syscall( unix.SYS_IOCTL, uintptr(fd), uintptr(unix.SIOCSIFMTU), uintptr(unsafe.Pointer(&ifr[0])), ) if errno != 0 { return fmt.Errorf("failed to set MTU on %s", tun.name) } return nil } func (tun *NativeTun) MTU() (int, error) { // open datagram socket fd, err := unix.Socket( unix.AF_INET, unix.SOCK_DGRAM, 0, ) if err != nil { return 0, err } defer unix.Close(fd) // do ioctl call var ifr [64]byte copy(ifr[:], tun.name) _, _, errno := unix.Syscall( unix.SYS_IOCTL, uintptr(fd), uintptr(unix.SIOCGIFMTU), uintptr(unsafe.Pointer(&ifr[0])), ) if errno != 0 { return 0, fmt.Errorf("failed to get MTU on %s", tun.name) } return int(*(*int32)(unsafe.Pointer(&ifr[16]))), nil }
[ "\"WG_TUN_NAME_FILE\"" ]
[]
[ "WG_TUN_NAME_FILE" ]
[]
["WG_TUN_NAME_FILE"]
go
1
0
TriggerEfficiency/TrigEffAnalysis_MC2018.py
import os from efficiency_analyzer import EfficiencyAnalyzer cuts = [ 'Vtype == 3 && Electron_pt[0]>35 && nJet >= 2 && Sum$(Jet_Pt > 20 && abs(Jet_eta) < 2.5 && Jet_lepFilter) >= 2 && HLT_Ele32_WPTight_Gsf && abs(TVector2::Phi_mpi_pi(Electron_phi[0] - MET_phi)) < 2.5 && Flag_goodVertices && Flag_globalSuperTightHalo2016Filter && Flag_HBHENoiseFilter && Flag_HBHENoiseIsoFilter && Flag_EcalDeadCellTriggerPrimitiveFilter && Flag_BadPFMuonFilter && Flag_BadChargedCandidateFilter && Flag_ecalBadCalibFilter', ] histograms = [ ('RawMET', 'RawMET_pt', 50, 0, 500), ('MET', 'MET_pt', 50, 0, 500), ('TkMET', 'TkMET_pt', 50, 0, 500), ('PuppiMET', 'PuppiMET_pt', 50, 0, 500), ('MHT', 'MHT_pt', 50, 0, 500), ('minMETMHT', 'min(MET_pt, MHT_pt)', 50, 0, 500), ('Vtype', 'Vtype', 7, -1, 6), ('Jet1Pt', 'Jet_Pt[0]', 50, 0, 500), ('Jet2Pt', 'Jet_Pt[1]', 50, 0, 500), ('Jet1Eta', 'Jet_eta[0]', 100, -5, 5), ('Jet2Eta', 'Jet_eta[1]', 100, -5, 5), ('Jet1CMVA', 'Jet_btagCMVA[0]', 100, -1, 1), ('Jet2CMVA', 'Jet_btagCMVA[1]', 100, -1, 1), ('absDeltaPhiJetMET', 'MinIf$(abs(TVector2::Phi_mpi_pi(Jet_phi - MET_phi)), Jet_pt > 30 && Jet_puId > 0)', 32, 0, 3.2), ] analyzer = EfficiencyAnalyzer() analyzer.submit( name='WJetsToLNu_HT1200to2500', #name='W3JetsToLNu_store_trees_full', #src='/W3JetsToLNu_TuneCP5_13TeV-madgraphMLM-pythia8/scoopers-RunIISummer17MiniAOD-92X-VHbbPostNano2017_V1-e273b12d9f89d622a34e4bc98b05ee29/USER', src='/WJetsToLNu_HT-1200To2500_TuneCP5_13TeV-madgraphMLM-pythia8/acalandr-RunIIAutumn18NanoAOD-102X_upgr82-82f9a1e3d3dcf76bf6a4a44034cf6840/USER', dbs_instance='phys03', cuts=cuts, histograms=histograms, commands={ 'x509userproxy': os.environ['X509_USER_PROXY'] }, no_submit = False )
[]
[]
[ "X509_USER_PROXY" ]
[]
["X509_USER_PROXY"]
python
1
0
examples/plot_obs_summary.py
#!/usr/bin/env python import numpy as np import matplotlib as mpl mpl.rcParams.update({'font.size': 18}) mpl.rcParams.update({'savefig.dpi': 200}) mpl.rcParams.update({'savefig.bbox': 'tight'}) import matplotlib.pyplot as plt # from astropy.utils.data import download_file # from astropy.utils import iers # iers.IERS.iers_table = iers.IERS_A.open(download_file(iers.IERS_A_URL, cache=True)) import astropy.units as u import astropy.coordinates import astropy.time import bossdata import os import pydl # helper function for shifting angles to desired range def normalize_angle(angle): while angle <= -180: angle += 360 while angle > 180: angle -= 360 return angle def main(): platelist = bossdata.meta.Database(platelist=True) # Select columns from "good" plates good_plates = platelist.select_all(what='PLATE,MJD,MAPNAME,RACEN,DECCEN,SEEING50,RMSOFF50,NGUIDE,TAI_BEG,AIRTEMP',where='PLATEQUALITY="good"') # Some observations near the beginning of the survey do not have guide cam data num_missing_guide_data = np.sum(good_plates['SEEING50'] == 0) print 'Number of plates with SEEING50=0: %d' % num_missing_guide_data # Plot seeing distribution plt.figure(figsize=(8,6)) plt.hist(good_plates['SEEING50'], bins=np.linspace(0.96, 2.6, 42), alpha=.5, histtype='stepfilled') plt.xlim(0.96, 2.6) plt.ylim(0, 200) plt.ylabel('Observations') plt.xlabel('PSF FWHM (arcseconds)') plt.grid(True) plt.savefig('psf-dist.pdf') # Some observations near the beginning of the survey do not have guide cam data num_missing_rms = np.sum((good_plates['RMSOFF50'] == 0) & (good_plates['NGUIDE'] == 0)) print 'Number of plates with RMSOFF50=0: %d' % num_missing_rms # Plot seeing distribution plt.figure(figsize=(8,6)) plt.hist(good_plates['RMSOFF50']*np.sqrt(good_plates['NGUIDE']), bins=np.linspace(0, 0.2, 101), alpha=.5, histtype='stepfilled') plt.xlim(0, 0.2) # plt.ylim(0, 200) plt.ylabel('Observations') plt.xlabel('RMSOFF50 (arcseconds)') plt.grid(True) plt.savefig('rms-dist.pdf') # It seems strange that so many plates have airtemp == 0, not sure why num_airtemp_zero = np.sum(good_plates['AIRTEMP'] == 0) print 'Number of plates with AIRTEMP=0: %d' % num_airtemp_zero # Plot airtemp distribution plt.figure(figsize=(8,6)) plt.hist(good_plates['AIRTEMP'], bins=np.linspace(-15, 25, 80+1), alpha=.5, histtype='stepfilled') plt.xlim(-15, 25) plt.ylabel('Observations') plt.xlabel('AIRTEMP (Degree Celsius)') plt.grid(True) plt.savefig('airtemp-dist.pdf') # observatory apo = astropy.coordinates.EarthLocation.of_site('apo') tai_mid = good_plates['TAI_BEG'] plate_centers = good_plates['RACEN']*u.deg when = astropy.time.Time(tai_mid/86400., format='mjd', scale='tai', location=apo) ha_array = np.array(map(normalize_angle, ((when.sidereal_time('apparent') - plate_centers).to(u.deg).value))) # finder = bossdata.path.Finder() # mirror = bossdata.remote.Manager() # speclog_path = os.getenv('BOSS_SPECLOG') # design_ha_array = [] # for i,obs in enumerate(good_plates): # plate,mjd = obs['PLATE'], obs['MJD'] # if i and (i % 25) == 0: # print plate, mjd # obs_mjd = '{:d}'.format(mjd) # plug_map_name = 'plPlugMapM-{}.par'.format(obs['MAPNAME']) # plug_map_path = os.path.join(speclog_path, obs_mjd, plug_map_name) # try: # plug_map = pydl.pydlutils.yanny.yanny(plug_map_path, np=True) # # Get the list of exposures used in this observation's coadd from a spec lite file. # # spec_name = finder.get_spec_path(plate, mjd, fiber=1, lite=True) # # spec_file = bossdata.spec.SpecFile(mirror.get(spec_name)) # # Read the first b1 raw science exposure to find this plate's plug map. # # raw = spec_file.get_raw_image(0, 'blue', finder=finder, mirror=mirror) # # plug_map = raw.read_plug_map() # # Look up the plate design pointing from the raw header and convert to # # an index A,B,C,... -> 0,1,2,... # # pointing_label = raw.header['POINTING'].strip() # # pointing_index = ord(pointing_label) - ord('A') # pointing_index = 0 # # Initialize a pointing object for this plate's sky location. # ra_center = float(plug_map['raCen']) * u.deg # dec_center = float(plug_map['decCen']) * u.deg # # print 'Plate center is RA={:.3f}, DEC={:.3f} for {}-{}'.format(ra_center, dec_center, plate, pointing_label) # #pointing = tpcorr.pointing.Pointing(ra_center, dec_center) # design_ha = float(plug_map['ha'].split()[pointing_index]) # design_ha_array.append(design_ha) # except KeyError, e: # print e # print plate, mjd, plug_map_path # design_ha_array = np.array(design_ha_array) # plt.figure(figsize=(8,6)) # plt.hist(ha_array-design_ha_array, bins=np.linspace(-45,45,91), alpha=0.5, histtype='stepfilled') # plt.xlabel(r'$h_{obs} - h_{design}$ (degrees)') # plt.ylabel('Observations') # plt.grid(True) # plt.savefig('dha-dist2.pdf') if __name__ == '__main__': main()
[]
[]
[ "BOSS_SPECLOG" ]
[]
["BOSS_SPECLOG"]
python
1
0
opentimelineio_contrib/adapters/maya_sequencer.py
# # Copyright 2017 Pixar Animation Studios # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. # """Maya Sequencer Adapter Harness""" import os import subprocess from .. import adapters def write_to_file(input_otio, filepath): if "OTIO_MAYA_PYTHON_BIN" not in os.environ: raise RuntimeError( "'OTIO_MAYA_PYTHON_BIN' not set, please set this to path to " "mayapy within the Maya installation." ) maya_python_path = os.environ["OTIO_MAYA_PYTHON_BIN"] if not os.path.exists(maya_python_path): raise RuntimeError( 'Cannot access file at OTIO_MAYA_PYTHON_BIN: "{}"'.format( maya_python_path ) ) if os.path.isdir(maya_python_path): raise RuntimeError( "OTIO_MAYA_PYTHON_BIN contains a path to a directory, not to an " "executable file: {}".format(maya_python_path) ) input_data = adapters.write_to_string(input_otio, "otio_json") os.environ['PYTHONPATH'] = ( os.pathsep.join( [ os.environ.setdefault('PYTHONPATH', ''), os.path.dirname(__file__) ] ) ) proc = subprocess.Popen( [ os.environ["OTIO_MAYA_PYTHON_BIN"], '-m', 'extern_maya_sequencer', 'write', filepath ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, env=os.environ ) proc.stdin.write(input_data) out, err = proc.communicate() if proc.returncode: raise RuntimeError( "ERROR: extern_maya_sequencer (called through the maya sequencer " "file adapter) failed. stderr output: " + err ) def read_from_file(filepath): if "OTIO_MAYA_PYTHON_BIN" not in os.environ: raise RuntimeError( "'OTIO_MAYA_PYTHON_BIN' not set, please set this to path to " "mayapy within the Maya installation." ) os.environ['PYTHONPATH'] = ( os.pathsep.join( [ os.environ.setdefault('PYTHONPATH', ''), os.path.dirname(__file__) ] ) ) proc = subprocess.Popen( [ os.environ["OTIO_MAYA_PYTHON_BIN"], '-m', 'extern_maya_sequencer', 'read', filepath ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, env=os.environ ) out, err = proc.communicate() # maya probably puts a bunch of crap on the stdout sentinel_str = "OTIO_JSON_BEGIN\n" end_sentinel_str = "\nOTIO_JSON_END\n" start = out.find(sentinel_str) end = out.find(end_sentinel_str) result = adapters.read_from_string( out[start + len(sentinel_str):end], "otio_json" ) if proc.returncode: raise RuntimeError( "ERROR: extern_maya_sequencer (called through the maya sequencer " "file adapter) failed. stderr output: " + err ) return result
[]
[]
[ "OTIO_MAYA_PYTHON_BIN", "PYTHONPATH" ]
[]
["OTIO_MAYA_PYTHON_BIN", "PYTHONPATH"]
python
2
0