repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
Mohamad1994HD/LinkArchiever
app/models/linkList.py
1
1189
from sets import Set from interfaceDB import insert_link_with_tag, is_link, is_tag, get_tags_ids_of_link, get_tags_from_ids, \ get_links_ids_from_tag, get_link_data_from_id class LinkList(list): def __init__(self, link_name, link_desc=None, link_tags=[]): list.__init__([]) self.name = link_name self.desc = link_desc self.extend(link_tags) def save_to_db(self): is_existed = is_link(self.name) for tag in self: insert_link_with_tag(self.name, tag, existed_link=is_existed, existed_tag=is_tag(tag_name=tag)) def get_tags_from_db(self): del self[:] self.extend(get_tags_from_ids(get_tags_ids_of_link(self.name))) return self def __repr__(self): return str(self.repr()) def repr(self): return {'name': self.name, 'desc': self.desc, 'tags': [i for i in self]} def get_links_ids_from_tags_lst(tags): l = [] for tag in tags: l.extend(get_links_ids_from_tag(tag)) my_set = Set(l) return list(my_set) def get_links_from_tags_lst(tags): ids = get_links_ids_from_tags_lst(tags) return [(get_link_data_from_id(id)) for id in ids]
gpl-3.0
-3,530,685,358,061,000,700
26.651163
107
0.606392
false
conda-forge/conda-forge-webservices
conda_forge_webservices/webapp.py
1
28798
import os import asyncio import tornado.escape import tornado.httpserver import tornado.ioloop import tornado.web import tornado.locks import hmac import hashlib import json from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor import atexit # import functools import logging import requests import github from datetime import datetime import conda_forge_webservices.linting as linting import conda_forge_webservices.feedstocks_service as feedstocks_service import conda_forge_webservices.update_teams as update_teams import conda_forge_webservices.commands as commands from conda_forge_webservices.update_me import get_current_versions from conda_forge_webservices.feedstock_outputs import ( validate_feedstock_outputs, copy_feedstock_outputs, is_valid_feedstock_token, comment_on_outputs_copy, ) from conda_forge_webservices import status_monitor STATUS_DATA_LOCK = tornado.locks.Lock() LOGGER = logging.getLogger("conda_forge_webservices") POOL = None def _worker_pool(): global POOL if POOL is None: if "PYTEST_CURRENT_TEST" in os.environ: # needed for mocks in testing POOL = ThreadPoolExecutor(max_workers=2) else: POOL = ProcessPoolExecutor(max_workers=2) return POOL def _shutdown_worker_pool(): global POOL if POOL is not None: POOL.shutdown(wait=False) atexit.register(_shutdown_worker_pool) THREAD_POOL = None def _thread_pool(): global THREAD_POOL if THREAD_POOL is None: THREAD_POOL = ThreadPoolExecutor(max_workers=2) return THREAD_POOL def _shutdown_thread_pool(): global THREAD_POOL if THREAD_POOL is not None: THREAD_POOL.shutdown(wait=False) atexit.register(_shutdown_thread_pool) def get_commit_message(full_name, commit): return ( github.Github(os.environ['GH_TOKEN']) .get_repo(full_name) .get_commit(commit) .commit .message) def print_rate_limiting_info_for_token(token, user): # Compute some info about our GitHub API Rate Limit. # Note that it doesn't count against our limit to # get this info. So, we should be doing this regularly # to better know when it is going to run out. Also, # this will help us better understand where we are # spending it and how to better optimize it. # Get GitHub API Rate Limit usage and total gh = github.Github(token) gh_api_remaining = gh.get_rate_limit().core.remaining gh_api_total = gh.get_rate_limit().core.limit # Compute time until GitHub API Rate Limit reset gh_api_reset_time = gh.get_rate_limit().core.reset gh_api_reset_time -= datetime.utcnow() msg = "{user} - remaining {remaining} out of {total}.".format( remaining=gh_api_remaining, total=gh_api_total, user=user, ) LOGGER.info( "github api requests: %s - %s", msg, "Will reset in {time}.".format(time=gh_api_reset_time) ) def print_rate_limiting_info(): d = [(os.environ['GH_TOKEN'], "conda-forge-linter")] LOGGER.info("") LOGGER.info("GitHub API Rate Limit Info:") for k, v in d: print_rate_limiting_info_for_token(k, v) LOGGER.info("") def valid_request(body, signature): our_hash = hmac.new( os.environ['CF_WEBSERVICES_TOKEN'].encode('utf-8'), body, hashlib.sha1, ).hexdigest() their_hash = signature.split("=")[1] return hmac.compare_digest(their_hash, our_hash) class LintingHookHandler(tornado.web.RequestHandler): async def post(self): headers = self.request.headers event = headers.get('X-GitHub-Event', None) if not valid_request( self.request.body, headers.get('X-Hub-Signature', ''), ): self.set_status(403) self.write_error(403) return if event == 'ping': self.write('pong') elif event == 'pull_request': body = tornado.escape.json_decode(self.request.body) repo_name = body['repository']['name'] owner = body['repository']['owner']['login'] pr_id = int(body['pull_request']['number']) is_open = body['pull_request']['state'] == 'open' if ( owner != 'conda-forge' or not ( repo_name == 'staged-recipes' or repo_name.endswith("-feedstock") ) ): self.set_status(404) self.write_error(404) return if body["action"] not in ["opened", "reopened", "synchronize", "unlocked"]: return if repo_name == 'staged-recipes': stale = any( label['name'] == 'stale' for label in body['pull_request']['labels'] ) else: stale = False # Only do anything if we are working with conda-forge, # and an open PR. if is_open and owner == 'conda-forge' and not stale: LOGGER.info("") LOGGER.info("===================================================") LOGGER.info("linting: %s", body['repository']['full_name']) LOGGER.info("===================================================") lint_info = await tornado.ioloop.IOLoop.current().run_in_executor( _worker_pool(), linting.compute_lint_message, owner, repo_name, pr_id, repo_name == 'staged-recipes', ) if lint_info: msg = linting.comment_on_pr( owner, repo_name, pr_id, lint_info['message'], search='conda-forge-linting service', ) linting.set_pr_status( owner, repo_name, lint_info, target_url=msg.html_url, ) print_rate_limiting_info() else: LOGGER.info('Unhandled event "{}".'.format(event)) self.set_status(404) self.write_error(404) class UpdateFeedstockHookHandler(tornado.web.RequestHandler): async def post(self): headers = self.request.headers event = headers.get('X-GitHub-Event', None) if not valid_request( self.request.body, headers.get('X-Hub-Signature', ''), ): self.set_status(403) self.write_error(403) return if event == 'ping': self.write('pong') return elif event == 'push': body = tornado.escape.json_decode(self.request.body) repo_name = body['repository']['name'] owner = body['repository']['owner']['login'] ref = body['ref'] commit = body.get('head_commit', {}).get('id', None) if commit: commit_msg = get_commit_message( body['repository']['full_name'], commit, ) else: commit_msg = "" # Only do anything if we are working with conda-forge, and a # push to master. if ( owner == 'conda-forge' and ref == "refs/heads/master" and "[cf admin skip feedstocks]" not in commit_msg and "[cf admin skip]" not in commit_msg ): LOGGER.info("") LOGGER.info("===================================================") LOGGER.info("feedstocks service: %s", body['repository']['full_name']) LOGGER.info("===================================================") handled = await tornado.ioloop.IOLoop.current().run_in_executor( _worker_pool(), feedstocks_service.handle_feedstock_event, owner, repo_name, ) if handled: print_rate_limiting_info() return else: LOGGER.info('Unhandled event "{}".'.format(event)) self.set_status(404) self.write_error(404) class UpdateTeamHookHandler(tornado.web.RequestHandler): async def post(self): headers = self.request.headers event = headers.get('X-GitHub-Event', None) if not valid_request( self.request.body, headers.get('X-Hub-Signature', ''), ): self.set_status(403) self.write_error(403) return if event == 'ping': self.write('pong') return elif event == 'push': body = tornado.escape.json_decode(self.request.body) repo_name = body['repository']['name'] owner = body['repository']['owner']['login'] ref = body['ref'] commit = body.get('head_commit', {}).get('id', None) if commit: commit_msg = get_commit_message( body['repository']['full_name'], commit, ) else: commit_msg = "" # Only do anything if we are working with conda-forge, # and a push to master. if ( owner == 'conda-forge' and repo_name.endswith("-feedstock") and ref == "refs/heads/master" and "[cf admin skip teams]" not in commit_msg and "[cf admin skip]" not in commit_msg ): LOGGER.info("") LOGGER.info("===================================================") LOGGER.info("updating team: %s", body['repository']['full_name']) LOGGER.info("===================================================") await tornado.ioloop.IOLoop.current().run_in_executor( _thread_pool(), # always threads due to expensive lru_cache update_teams.update_team, owner, repo_name, commit, ) print_rate_limiting_info() return else: LOGGER.info('Unhandled event "{}".'.format(event)) self.set_status(404) self.write_error(404) class CommandHookHandler(tornado.web.RequestHandler): async def post(self): headers = self.request.headers event = headers.get('X-GitHub-Event', None) if not valid_request( self.request.body, headers.get('X-Hub-Signature', ''), ): self.set_status(403) self.write_error(403) return if event == 'ping': self.write('pong') return elif ( event == 'pull_request_review' or event == 'pull_request' or event == 'pull_request_review_comment' ): body = tornado.escape.json_decode(self.request.body) action = body["action"] repo_name = body['repository']['name'] owner = body['repository']['owner']['login'] # Only do anything if we are working with conda-forge if ( owner != 'conda-forge' or not ( repo_name == "staged-recipes" or repo_name.endswith("-feedstock") ) ): self.set_status(404) self.write_error(404) return pr_repo = body['pull_request']['head']['repo'] pr_owner = pr_repo['owner']['login'] pr_repo = pr_repo['name'] pr_branch = body['pull_request']['head']['ref'] pr_num = body['pull_request']['number'] comment = None if event == 'pull_request_review' and action != 'dismissed': comment = body['review']['body'] elif ( event == 'pull_request' and action in ['opened', 'edited', 'reopened'] ): comment = body['pull_request']['body'] elif ( event == 'pull_request_review_comment' and action != 'deleted' ): comment = body['comment']['body'] if comment: LOGGER.info("") LOGGER.info("===================================================") LOGGER.info("PR command: %s", body['repository']['full_name']) LOGGER.info("===================================================") await tornado.ioloop.IOLoop.current().run_in_executor( _worker_pool(), commands.pr_detailed_comment, owner, repo_name, pr_owner, pr_repo, pr_branch, pr_num, comment, ) print_rate_limiting_info() return elif event == 'issue_comment' or event == "issues": body = tornado.escape.json_decode(self.request.body) action = body["action"] repo_name = body['repository']['name'] owner = body['repository']['owner']['login'] issue_num = body['issue']['number'] # Only do anything if we are working with conda-forge if ( owner != 'conda-forge' or not ( repo_name == "staged-recipes" or repo_name.endswith("-feedstock") ) ): self.set_status(404) self.write_error(404) return pull_request = False if "pull_request" in body["issue"]: pull_request = True if pull_request and action != 'deleted': comment = body['comment']['body'] LOGGER.info("") LOGGER.info("===================================================") LOGGER.info("PR command: %s", body['repository']['full_name']) LOGGER.info("===================================================") await tornado.ioloop.IOLoop.current().run_in_executor( _worker_pool(), commands.pr_comment, owner, repo_name, issue_num, comment, ) print_rate_limiting_info() return if ( not pull_request and action in ['opened', 'edited', 'created', 'reopened'] ): title = body['issue']['title'] if event == "issues" else "" if 'comment' in body: comment = body['comment']['body'] else: comment = body['issue']['body'] LOGGER.info("") LOGGER.info("===================================================") LOGGER.info("issue command: %s", body['repository']['full_name']) LOGGER.info("===================================================") await tornado.ioloop.IOLoop.current().run_in_executor( _worker_pool(), commands.issue_comment, owner, repo_name, issue_num, title, comment, ) print_rate_limiting_info() return else: LOGGER.info('Unhandled event "{}".'.format(event)) self.set_status(404) self.write_error(404) class UpdateWebservicesVersionsHandler(tornado.web.RequestHandler): async def get(self): self.write(json.dumps(get_current_versions())) def _repo_exists(feedstock): r = requests.get("https://github.com/conda-forge/%s" % feedstock) if r.status_code != 200: return False else: return True class OutputsValidationHandler(tornado.web.RequestHandler): """This is a stub that we keep around so that old CI jobs still work if they have not bveen rerendered. We should remove it eventually.""" async def post(self): self.write(json.dumps({"deprecated": True})) def _do_copy(feedstock, outputs, channel, git_sha, comment_on_error): valid, errors = validate_feedstock_outputs( feedstock, outputs, ) outputs_to_copy = {} for o in valid: if valid[o]: outputs_to_copy[o] = outputs[o] if outputs_to_copy: copied = copy_feedstock_outputs( outputs_to_copy, channel, delete=False, ) # send for github releases copy if True: try: gh = github.Github(os.environ["GH_TOKEN"]) repo = gh.get_repo("conda-forge/repodata-shards") for dist in copied: if not copied[dist]: continue _subdir, _pkg = os.path.split(dist) if channel == "main": _url = f"https://conda.anaconda.org/cf-staging/{dist}" else: _url = ( "https://conda.anaconda.org/cf-staging/label/" + f"{channel}/{dist}" ) repo.create_repository_dispatch( "release", { "artifact_url": _url, "md5": outputs_to_copy[dist], "subdir": _subdir, "package": _pkg, "url": _url, "feedstock": feedstock, "label": channel, "git_sha": git_sha, "comment_on_error": comment_on_error, } ) LOGGER.info(" artifact %s sent for copy", dist) except Exception as e: LOGGER.info( " repo dispatch for artifact copy failed: %s", repr(e) ) else: copied = {} for o in outputs: if o not in copied: copied[o] = False if not all(copied[o] for o in outputs) and comment_on_error: comment_on_outputs_copy( feedstock, git_sha, errors, valid, copied) return valid, errors, copied class OutputsCopyHandler(tornado.web.RequestHandler): async def post(self): headers = self.request.headers feedstock_token = headers.get('FEEDSTOCK_TOKEN', None) data = tornado.escape.json_decode(self.request.body) feedstock = data.get("feedstock", None) outputs = data.get("outputs", None) channel = data.get("channel", None) git_sha = data.get("git_sha", None) # the old default was to comment only if the git sha was not None # so we keep that here comment_on_error = data.get("comment_on_error", git_sha is not None) LOGGER.info("") LOGGER.info("===================================================") LOGGER.info("copy outputs for feedstock '%s'" % feedstock) LOGGER.info("===================================================") if feedstock is not None and len(feedstock) > 0: feedstock_exists = _repo_exists(feedstock) else: feedstock_exists = False valid_token = False if ( feedstock_exists and feedstock_token is not None and len(feedstock_token) > 0 and is_valid_feedstock_token( "conda-forge", feedstock, feedstock_token ) ): valid_token = True if ( (not feedstock_exists) or outputs is None or channel is None or (not valid_token) ): LOGGER.warning(' invalid outputs copy request for %s!' % feedstock) LOGGER.warning(' feedstock exists: %s' % feedstock_exists) LOGGER.warning(' outputs: %s' % outputs) LOGGER.warning(' channel: %s' % channel) LOGGER.warning(' valid token: %s' % valid_token) err_msgs = [] if outputs is None: err_msgs.append("no outputs data sent for copy") if channel is None: err_msgs.append("no channel sent for copy") if not valid_token: err_msgs.append("invalid feedstock token") if feedstock_exists and comment_on_error: comment_on_outputs_copy( feedstock, git_sha, err_msgs, {}, {} ) self.set_status(403) self.write_error(403) else: ( valid, errors, copied, ) = await tornado.ioloop.IOLoop.current().run_in_executor( _worker_pool(), _do_copy, feedstock, outputs, channel, git_sha, comment_on_error, ) if not all(v for v in copied.values()): self.set_status(403) self.write(json.dumps( {"errors": errors, "valid": valid, "copied": copied})) LOGGER.info(" errors: %s", errors) LOGGER.info(" valid: %s", valid) LOGGER.info(" copied: %s", copied) print_rate_limiting_info() return # code to pass everything through # not used but can be to turn it all off if we need to # if outputs is not None and channel is not None: # copied = await tornado.ioloop.IOLoop.current().run_in_executor( # _worker_pool(), # copy_feedstock_outputs, # outputs, # channel, # ) # # if not all(v for v in copied.values()): # self.set_status(403) # # if git_sha is not None and not all(copied[o] for o in outputs): # comment_on_outputs_copy( # feedstock, git_sha, ["some outputs did not copy"], {}, copied) # # self.write(json.dumps( # {"errors": ["some outputs did not copy"], # "valid": {}, # "copied": copied})) # # LOGGER.info(" errors: %s", ["some outputs did not copy"]) # LOGGER.info(" valid: %s", {}) # LOGGER.info(" copied: %s", copied) # # else: # if git_sha is not None and feedstock is not None: # comment_on_outputs_copy( # feedstock, git_sha, # ["invalid copy request (either bad data or bad feedstock token)"], # {}, {} # ) # self.set_status(403) # self.write_error(403) # # return class StatusMonitorPayloadHookHandler(tornado.web.RequestHandler): async def post(self): headers = self.request.headers event = headers.get('X-GitHub-Event', None) if not valid_request( self.request.body, headers.get('X-Hub-Signature', ''), ): self.set_status(403) self.write_error(403) return if event == 'ping': self.write('pong') return body = tornado.escape.json_decode(self.request.body) if event == 'check_run': LOGGER.info("") LOGGER.info("===================================================") LOGGER.info("check run: %s", body['repository']['full_name']) LOGGER.info("===================================================") async with STATUS_DATA_LOCK: status_monitor.update_data_check_run(body) return elif event == 'check_suite': self.write(event) return elif event == 'status': LOGGER.info("") LOGGER.info("===================================================") LOGGER.info("status: %s", body['repository']['full_name']) LOGGER.info("===================================================") async with STATUS_DATA_LOCK: status_monitor.update_data_status(body) return else: LOGGER.info('Unhandled event "{}".'.format(event)) self.set_status(404) self.write_error(404) class StatusMonitorAzureHandler(tornado.web.RequestHandler): async def get(self): self.add_header("Access-Control-Allow-Origin", "*") self.write(status_monitor.get_azure_status()) class StatusMonitorDBHandler(tornado.web.RequestHandler): async def get(self): self.add_header("Access-Control-Allow-Origin", "*") self.write(status_monitor.dump_report_data()) class StatusMonitorReportHandler(tornado.web.RequestHandler): async def get(self, name): self.add_header("Access-Control-Allow-Origin", "*") self.write(status_monitor.dump_report_data(name=name)) class StatusMonitorHandler(tornado.web.RequestHandler): async def get(self): self.write(status_monitor.render_status_index()) class AliveHandler(tornado.web.RequestHandler): async def get(self): self.add_header("Access-Control-Allow-Origin", "*") self.write(json.dumps({"status": "operational"})) def create_webapp(): application = tornado.web.Application([ (r"/conda-linting/org-hook", LintingHookHandler), (r"/conda-forge-feedstocks/org-hook", UpdateFeedstockHookHandler), (r"/conda-forge-teams/org-hook", UpdateTeamHookHandler), (r"/conda-forge-command/org-hook", CommandHookHandler), (r"/conda-webservice-update/versions", UpdateWebservicesVersionsHandler), (r"/feedstock-outputs/validate", OutputsValidationHandler), (r"/feedstock-outputs/copy", OutputsCopyHandler), (r"/status-monitor/payload", StatusMonitorPayloadHookHandler), (r"/status-monitor/azure", StatusMonitorAzureHandler), (r"/status-monitor/db", StatusMonitorDBHandler), (r"/status-monitor/report/(.*)", StatusMonitorReportHandler), (r"/status-monitor", StatusMonitorHandler), (r"/alive", AliveHandler), ]) return application async def _cache_data(): LOGGER.info("") LOGGER.info("===================================================") LOGGER.info("caching status data") LOGGER.info("===================================================") async with STATUS_DATA_LOCK: await tornado.ioloop.IOLoop.current().run_in_executor( _thread_pool(), status_monitor.cache_status_data, ) def main(): # start logging and reset the log format to make it a bit easier to read tornado.log.enable_pretty_logging() from tornado.log import LogFormatter my_log_formatter = LogFormatter(fmt='%(message)s', color=True) root_logger = logging.getLogger() root_streamhandler = root_logger.handlers[0] root_streamhandler.setFormatter(my_log_formatter) import argparse parser = argparse.ArgumentParser() parser.add_argument( "--local", help="run the webserver locally on 127.0.0.1:5000", action="store_true", ) args = parser.parse_args() application = create_webapp() http_server = tornado.httpserver.HTTPServer(application, xheaders=True) port = int(os.environ.get("PORT", 5000)) LOGGER.info("starting server") if args.local: LOGGER.info( "server address: http://127.0.0.1:5000/") http_server.listen(5000, address='127.0.0.1') else: http_server.listen(port) pcb = tornado.ioloop.PeriodicCallback( lambda: asyncio.create_task(_cache_data()), status_monitor.TIME_INTERVAL * 1000, ) pcb.start() tornado.ioloop.IOLoop.instance().start() if __name__ == "__main__": main()
bsd-3-clause
1,596,594,711,162,535,400
32.721311
88
0.494514
false
andybalaam/pepper
old/pepper1/src/libpepper/languagevalues.py
1
5754
# Copyright (C) 2011-2012 Andy Balaam and The Pepper Developers # Released under the MIT License. See the file COPYING.txt for details. from all_known import all_known from pepinterface import implements_interface from values import PepArray from values import PepBool from vals.numbers import PepInt from values import PepSymbol from values import PepType from values import PepValue from vals.basic_types.pepvariable import PepVariable from values import pep_none from usererrorexception import PepUserErrorException class PepUninitedMemberVariable( PepVariable ): """ A placeholder variable that tells us the name of a member that will be initialised during construction. """ def __init__( self, clazz, name ): PepVariable.__init__( self, clazz, name ) class PepImport( PepValue ): def __init__( self, module_name ): PepValue.__init__( self ) self.module_name = module_name def construction_args( self ): return ( self.module_name, ) def do_evaluate( self, env ): if self.module_name == "sys": import builtinmodules.pepsys env.namespace["sys"] = builtinmodules.pepsys.PepSys() else: raise PepUserErrorException( "No module named %s" % self.module_name ) self.cached_eval = self return self def ct_eval( self, env ): return self.evaluate( env ) class PepArrayLookup( PepValue ): def __init__( self, array_value, index ): PepValue.__init__( self ) self.array_value = array_value self.index = index def construction_args( self ): return ( self.array_value, self.index ) def do_evaluate( self, env ): idx = self.index.evaluate( env ) arr = self.array_value.evaluate( env ) if arr.is_known( env ): assert( idx.__class__ == PepInt ) assert( implements_interface( arr, PepArray ) ) # TODO: handle large number indices return arr.get_index( int( idx.value ) ).evaluate( env ) else: return self def is_known( self, env ): return all_known( ( self.array_value, self.index ), env ) class PepIf( PepValue ): def __init__( self, predicate, cmds_if_true, cmds_if_false ): PepValue.__init__( self ) self.predicate = predicate self.cmds_if_true = cmds_if_true self.cmds_if_false = cmds_if_false def construction_args( self ): return ( self.predicate, self.cmds_if_true, self.cmds_if_false ) def do_evaluate( self, env ): pred = self.predicate.evaluate( env ) if pred.is_known( env ): assert( pred.__class__ == PepBool ) # TODO: other types if pred.value: return self._run_commands( self.cmds_if_true, env ) elif self.cmds_if_false is not None: return self._run_commands( self.cmds_if_false, env ) else: return pep_none else: return self def is_known( self, env ): pred = self.predicate#.evaluate( env ) return ( pred.is_known( env ) and ( ( pred.value and all_known( self.cmds_if_true ) ) or ( not pred.value ) # TODO and elses known) ) ) def ct_eval( self, env ): return self.evaluate( env ) def _run_commands( self, cmds, env ): # TODO: support PepArray of statements? As well? ret = None for cmd in cmds: ret = cmd#.evaluate( env ) return ret # TODO: should we return all evaluated statements? class PepInitialisingWithWrongType( PepUserErrorException ): def __init__( self, decl_type, init_value_type ): PepUserErrorException.__init__( self, ( "Declared type is %s, but " + "initial value supplied is of type %s." ) % ( str( decl_type ), str( init_value_type ) ) ) class PepInit( PepValue ): def __init__( self, var_type, var_name, init_value ): PepValue.__init__( self ) self.var_type = var_type self.var_name = var_name self.init_value = init_value def construction_args( self ): return ( self.var_type, self.var_name, self.init_value ) def _eval_args( self, env ): tp = self.var_type.evaluate( env ) # Don't evaluate - will need to semi-evaluate in # order to support symbol( "x" ) here? assert( self.var_name.__class__ == PepSymbol ) # TODO: not assert (namespace, name, base_sym) = self.var_name.find_namespace_and_name( env ) # TODO: don't think it's right to evaluate here - maybe ct_eval? val = self.init_value.evaluate( env ) #.evaluate( return ( tp, namespace, name, val ) def do_evaluate( self, env ): ( tp, ns, nm, val ) = self._eval_args( env ) if nm in ns: if not isinstance( ns[nm], PepUninitedMemberVariable ): raise PepUserErrorException( "Namespace already contains the name '" + nm + "'." ) val_type = val.evaluated_type( env ) if not tp.matches( val_type, env ): raise PepInitialisingWithWrongType( tp, val_type ) def make_value(): if val.is_known( env ): return val else: return PepVariable( tp.evaluate( env ), nm ) ns.overwrite( nm, make_value() ) return self def ct_eval( self, env ): return self.evaluate( env ) # TODO: not a real evaluate here def is_known( self, env ): ( tp, ns, nm, val ) = self._eval_args( env ) return all_known( ( tp, val ), env )
mit
-1,338,129,985,425,157,000
31.88
76
0.582204
false
mediatum/mediatum
web/ftree/ftree.py
1
2139
""" mediatum - a multimedia content repository Copyright (C) 2010 Arne Seifert <[email protected]> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import logging from core import db from core.transition import current_user from contenttypes import Container, Content from .ftreedata import getData, getPathTo, getLabel q = db.query logg = logging.getLogger(__name__) def ftree(req): if not current_user.is_editor: logg.warn("ftree permission denied for user: %s", current_user.id) return 403 if "parentId" in req.params: return getData(req) if "pathTo" in req.params: return getPathTo(req) if "getLabel" in req.params: return getLabel(req) if "changeCheck" in req.params: for id in req.params.get("currentitem").split(","): node = q(Content).get(id) parent = q(Container).get(req.params.get("changeCheck")) if not(node and parent and node.has_write_access() and parent.has_write_access()): logg.warn("illegal ftree request: %s", req.params) return 403 if node in parent.content_children: if len(node.parents) > 1: parent.content_children.remove(node) logg.info("ftree change ") db.session.commit() else: req.writeTALstr('<tal:block i18n:translate="edit_classes_noparent"/>', {}) else: parent.content_children.append(node) db.session.commit()
gpl-3.0
-1,403,158,157,199,159,800
33.5
94
0.649836
false
Desolace/LudumDare32
game_loop.py
1
4227
import pygame from pygame.locals import * from input_manager import InputManager, Actions import screens, menu from game_instance import GameInstance from ui_overlay import UIOverlay, TextElement from pyconsole import Console class Game(object): def __init__(self, config): self.max_fps = config.get("max_fps", 60) pygame.init() pygame.display.set_caption(config["title"]) pygame.mixer.init(frequency=44100, size=-16, channels=2, buffer=4096) self.display = pygame.display.set_mode((config["width"], config["height"])) self.world = GameInstance(config, "first") self.clock = pygame.time.Clock() self.input_manager = InputManager() self.ui_overlay = UIOverlay(config["font_file"]) self.ui_overlay.text_elements["framerate"] = TextElement((20, 50), 20, (0, 0, 0), "0 fps") self.screens = { "pause":screens.PauseScreen(config), "inventory":menu.InventoryMenu(config), "complete":screens.CompleteScreen(config), "gameover":screens.GameOverScreen(config) } self.main_menu = menu.MainMenu(config, enabled=True) self.paused = False self.show_fps = config.get("show_fps", False) self.run_loop = self.debug_loop self.is_running = True if config.get("use_debug_console", False): self.console = Console(self.display, (0, 0, self.display.get_width(), self.display.get_height() / 2), vars={'game':self}) self.console.set_active(False) self.console.setvar("python_mode", True) self.console.set_interpreter() def debug_loop(self): events = self.input_manager.get_active_events() #user closes the game if Actions.QUIT in events: pygame.quit() self.is_running = False return elif Actions.START_GAME in events: self.run_loop = self.in_game_loop delta = self.clock.tick(self.max_fps) / 1000.0 self.main_menu.doFrame(self.display, delta, events) pygame.display.flip() def toggle_paused_screen(self, screen_name): self.paused = not self.paused self.screens[screen_name].enabled = not self.screens[screen_name].enabled def in_game_loop(self): if hasattr(self, 'console'): self.console.process_input() events = self.input_manager.get_active_events() #user closes the game if Actions.QUIT in events: pygame.quit() self.is_running = False return elif Actions.GAME_WON in events: self.toggle_paused_screen("complete") elif Actions.GAME_OVER in events: self.toggle_paused_screen("gameover") #toggle relevent ui screens elif Actions.TOGGLE_PAUSE in events: self.toggle_paused_screen("pause") elif Actions.TOGGLE_INVENTORY in events: self.toggle_paused_screen("inventory") elif Actions.TOGGLE_SHOW_FPS in events: self.show_fps = not self.show_fps elif Actions.SHOW_CONSOLE in events: if hasattr(self, 'console'): self.console.set_active() delta = self.clock.tick(self.max_fps) / 1000.0 if self.show_fps: self.ui_overlay.text_elements["framerate"].value = "{0} fps".format(int(self.clock.get_fps())) else: self.ui_overlay.text_elements["framerate"].value = "" self.ui_overlay.update(delta) #render the game field, a delta of 0 means don't do any physics updates, events of [] means dont perform any inputs if self.paused: self.world.doFrame(self.display, 0, []) else: self.world.doFrame(self.display, delta, events) #display any active ui screens for screen in self.screens.values(): screen.doFrame(self.display, delta, events) #render the app-scope UI for (label, position, _) in self.ui_overlay.get_drawables(): self.display.blit(label, position) if hasattr(self, 'console'): self.console.draw() #give it to the user pygame.display.flip()
mit
8,239,904,623,314,838,000
36.078947
133
0.610599
false
dhcrzf/zulip
zerver/tests/test_templates.py
1
15195
# -*- coding: utf-8 -*- import os import re from typing import Any, Dict, Iterable import logging from django.conf import settings from django.test import override_settings from django.template import Template, Context from django.template.loader import get_template from django.test.client import RequestFactory from zerver.lib.exceptions import InvalidMarkdownIncludeStatement from zerver.lib.test_helpers import get_all_templates from zerver.lib.test_classes import ( ZulipTestCase, ) from zerver.lib.test_runner import slow from zerver.context_processors import common_context class get_form_value: def __init__(self, value: Any) -> None: self._value = value def value(self) -> Any: return self._value class DummyForm(Dict[str, Any]): pass class TemplateTestCase(ZulipTestCase): """ Tests that backend template rendering doesn't crash. This renders all the Zulip backend templates, passing dummy data as the context, which allows us to verify whether any of the templates are broken enough to not render at all (no verification is done that the output looks right). Please see `get_context` function documentation for more information. """ @slow("Tests a large number of different templates") @override_settings(TERMS_OF_SERVICE=None) def test_templates(self) -> None: # Just add the templates whose context has a conflict with other # templates' context in `defer`. defer = ['analytics/activity.html'] # Django doesn't send template_rendered signal for parent templates # https://code.djangoproject.com/ticket/24622 covered = [ 'zerver/portico.html', 'zerver/portico_signup.html', ] logged_out = [ 'confirmation/confirm.html', # seems unused 'zerver/compare.html', 'zerver/footer.html', ] logged_in = [ 'analytics/stats.html', 'zerver/drafts.html', 'zerver/home.html', 'zerver/invite_user.html', 'zerver/keyboard_shortcuts.html', 'zerver/left_sidebar.html', 'zerver/landing_nav.html', 'zerver/logout.html', 'zerver/markdown_help.html', 'zerver/navbar.html', 'zerver/right_sidebar.html', 'zerver/search_operators.html', 'zerver/settings_overlay.html', 'zerver/settings_sidebar.html', 'zerver/stream_creation_prompt.html', 'zerver/subscriptions.html', 'zerver/message_history.html', 'zerver/delete_message.html', ] unusual = [ 'zerver/emails/confirm_new_email.subject.txt', 'zerver/emails/compiled/confirm_new_email.html', 'zerver/emails/confirm_new_email.txt', 'zerver/emails/notify_change_in_email.subject.txt', 'zerver/emails/compiled/notify_change_in_email.html', 'zerver/emails/digest.subject.txt', 'zerver/emails/digest.html', 'zerver/emails/digest.txt', 'zerver/emails/followup_day1.subject.txt', 'zerver/emails/compiled/followup_day1.html', 'zerver/emails/followup_day1.txt', 'zerver/emails/followup_day2.subject.txt', 'zerver/emails/followup_day2.txt', 'zerver/emails/compiled/followup_day2.html', 'zerver/emails/compiled/password_reset.html', 'corporate/mit.html', 'corporate/zephyr.html', 'corporate/zephyr-mirror.html', 'pipeline/css.jinja', 'pipeline/inline_js.jinja', 'pipeline/js.jinja', 'zilencer/enterprise_tos_accept_body.txt', 'zerver/zulipchat_migration_tos.html', 'zilencer/enterprise_tos_accept_body.txt', 'zerver/invalid_email.html', 'zerver/topic_is_muted.html', 'zerver/bankruptcy.html', 'zerver/lightbox_overlay.html', 'zerver/invalid_realm.html', 'zerver/compose.html', 'zerver/debug.html', 'zerver/base.html', 'zerver/api_content.json', 'zerver/handlebars_compilation_failed.html', 'zerver/portico-header.html', 'zerver/deprecation_notice.html', 'two_factor/_wizard_forms.html', ] integrations_regexp = re.compile('zerver/integrations/.*.html') # Since static/generated/bots/ is searched by Jinja2 for templates, # it mistakes logo files under that directory for templates. bot_logos_regexp = re.compile(r'\w+\/logo\.(svg|png)$') skip = covered + defer + logged_out + logged_in + unusual + ['tests/test_markdown.html', 'zerver/terms.html', 'zerver/privacy.html'] templates = [t for t in get_all_templates() if not ( t in skip or integrations_regexp.match(t) or bot_logos_regexp.match(t))] self.render_templates(templates, self.get_context()) # Test the deferred templates with updated context. update = {'data': [('one', 'two')]} self.render_templates(defer, self.get_context(**update)) def render_templates(self, templates: Iterable[str], context: Dict[str, Any]) -> None: for template_name in templates: template = get_template(template_name) try: template.render(context) except Exception: # nocoverage # nicer error handler logging.error("Exception while rendering '{}'".format(template.template.name)) raise def get_context(self, **kwargs: Any) -> Dict[str, Any]: """Get the dummy context for shallow testing. The context returned will always contain a parameter called `shallow_tested`, which tells the signal receiver that the test was not rendered in an actual logical test (so we can still do coverage reporting on which templates have a logical test). Note: `context` just holds dummy values used to make the test pass. This context only ensures that the templates do not throw a 500 error when rendered using dummy data. If new required parameters are added to a template, this test will fail; the usual fix is to just update the context below to add the new parameter to the dummy data. :param kwargs: Keyword arguments can be used to update the base context. """ user_profile = self.example_user('hamlet') email = user_profile.email context = dict( sidebar_index="zerver/help/include/sidebar_index.md", doc_root="/help/", article="zerver/help/index.md", shallow_tested=True, user_profile=user_profile, user=user_profile, form=DummyForm( full_name=get_form_value('John Doe'), terms=get_form_value(True), email=get_form_value(email), emails=get_form_value(email), subdomain=get_form_value("zulip"), next_param=get_form_value("billing") ), current_url=lambda: 'www.zulip.com', integrations_dict={}, referrer=dict( full_name='John Doe', realm=dict(name='zulip.com'), ), message_count=0, messages=[dict(header='Header')], new_streams=dict(html=''), data=dict(title='Title'), device_info={"device_browser": "Chrome", "device_os": "Windows", "device_ip": "127.0.0.1", "login_time": "9:33am NewYork, NewYork", }, api_uri_context={}, cloud_annual_price=80, seat_count=8, request=RequestFactory().get("/") ) context.update(kwargs) return context def test_markdown_in_template(self) -> None: template = get_template("tests/test_markdown.html") context = { 'markdown_test_file': "zerver/tests/markdown/test_markdown.md" } content = template.render(context) content_sans_whitespace = content.replace(" ", "").replace('\n', '') self.assertEqual(content_sans_whitespace, 'header<h1id="hello">Hello!</h1><p>Thisissome<em>boldtext</em>.</p>footer') def test_markdown_tabbed_sections_extension(self) -> None: template = get_template("tests/test_markdown.html") context = { 'markdown_test_file': "zerver/tests/markdown/test_tabbed_sections.md" } content = template.render(context) content_sans_whitespace = content.replace(" ", "").replace('\n', '') # Note that the expected HTML has a lot of stray <p> tags. This is a # consequence of how the Markdown renderer converts newlines to HTML # and how elements are delimited by newlines and so forth. However, # stray <p> tags are usually matched with closing tags by HTML renderers # so this doesn't affect the final rendered UI in any visible way. expected_html = """ header <h1 id="heading">Heading</h1> <p> <div class="code-section" markdown="1"> <ul class="nav"> <li data-language="ios">iOS</li> <li data-language="desktop-web">Desktop/Web</li> </ul> <div class="blocks"> <div data-language="ios" markdown="1"></p> <p>iOS instructions</p> <p></div> <div data-language="desktop-web" markdown="1"></p> <p>Desktop/browser instructions</p> <p></div> </div> </div> </p> <h2 id="heading-2">Heading 2</h2> <p> <div class="code-section" markdown="1"> <ul class="nav"> <li data-language="desktop-web">Desktop/Web</li> <li data-language="android">Android</li> </ul> <div class="blocks"> <div data-language="desktop-web" markdown="1"></p> <p>Desktop/browser instructions</p> <p></div> <div data-language="android" markdown="1"></p> <p>Android instructions</p> <p></div> </div> </div> </p> footer """ expected_html_sans_whitespace = expected_html.replace(" ", "").replace('\n', '') self.assertEqual(content_sans_whitespace, expected_html_sans_whitespace) def test_encoded_unicode_decimals_in_markdown_template(self) -> None: template = get_template("tests/test_unicode_decimals.html") context = {'unescape_rendered_html': False} content = template.render(context) content_sans_whitespace = content.replace(" ", "").replace('\n', '') self.assertEqual(content_sans_whitespace, 'header<p>&#123;&#125;</p>footer') context = {'unescape_rendered_html': True} content = template.render(context) content_sans_whitespace = content.replace(" ", "").replace('\n', '') self.assertEqual(content_sans_whitespace, 'header<p>{}</p>footer') def test_markdown_nested_code_blocks(self) -> None: template = get_template("tests/test_markdown.html") context = { 'markdown_test_file': "zerver/tests/markdown/test_nested_code_blocks.md" } content = template.render(context) content_sans_whitespace = content.replace(" ", "").replace('\n', '') expected = ('header<h1id="this-is-a-heading">Thisisaheading.</h1><ol>' '<li><p>Alistitemwithanindentedcodeblock:</p><divclass="codehilite">' '<pre>indentedcodeblockwithmultiplelines</pre></div></li></ol>' '<divclass="codehilite"><pre><span></span>' 'non-indentedcodeblockwithmultiplelines</pre></div>footer') self.assertEqual(content_sans_whitespace, expected) def test_custom_markdown_include_extension(self) -> None: template = get_template("tests/test_markdown.html") context = { 'markdown_test_file': "zerver/tests/markdown/test_custom_include_extension.md" } with self.assertRaisesRegex(InvalidMarkdownIncludeStatement, "Invalid markdown include statement"): template.render(context) def test_custom_markdown_include_extension_empty_macro(self) -> None: template = get_template("tests/test_markdown.html") context = { 'markdown_test_file': "zerver/tests/markdown/test_custom_include_extension_empty.md" } content = template.render(context) content_sans_whitespace = content.replace(" ", "").replace('\n', '') expected = 'headerfooter' self.assertEqual(content_sans_whitespace, expected) def test_custom_tos_template(self) -> None: response = self.client_get("/terms/") self.assert_in_success_response([u"Thanks for using our products and services (\"Services\"). ", u"By using our Services, you are agreeing to these terms"], response) def test_custom_terms_of_service_template(self) -> None: not_configured_message = 'This installation of Zulip does not have a configured ' \ 'terms of service' with self.settings(TERMS_OF_SERVICE=None): response = self.client_get('/terms/') self.assert_in_success_response([not_configured_message], response) with self.settings(TERMS_OF_SERVICE='zerver/tests/markdown/test_markdown.md'): response = self.client_get('/terms/') self.assert_in_success_response(['This is some <em>bold text</em>.'], response) self.assert_not_in_success_response([not_configured_message], response) def test_custom_privacy_policy_template(self) -> None: not_configured_message = 'This installation of Zulip does not have a configured ' \ 'privacy policy' with self.settings(PRIVACY_POLICY=None): response = self.client_get('/privacy/') self.assert_in_success_response([not_configured_message], response) with self.settings(PRIVACY_POLICY='zerver/tests/markdown/test_markdown.md'): response = self.client_get('/privacy/') self.assert_in_success_response(['This is some <em>bold text</em>.'], response) self.assert_not_in_success_response([not_configured_message], response) def test_custom_privacy_policy_template_with_absolute_url(self) -> None: current_dir = os.path.dirname(os.path.abspath(__file__)) abs_path = os.path.join(current_dir, '..', '..', 'templates/zerver/tests/markdown/test_markdown.md') with self.settings(PRIVACY_POLICY=abs_path): response = self.client_get('/privacy/') self.assert_in_success_response(['This is some <em>bold text</em>.'], response)
apache-2.0
-363,827,943,029,641,300
40.067568
107
0.595656
false
mbr/simplekv
tests/test_boto3_store.py
1
2563
#!/usr/bin/env python import os import pytest boto3 = pytest.importorskip('boto3') from simplekv.net.boto3store import Boto3Store from simplekv._compat import BytesIO from basic_store import BasicStore from url_store import UrlStore from bucket_manager import boto_credentials, boto3_bucket from conftest import ExtendedKeyspaceTests from simplekv.contrib import ExtendedKeyspaceMixin @pytest.fixture(params=boto_credentials, ids=[c['access_key'] for c in boto_credentials]) def credentials(request): return request.param @pytest.yield_fixture() def bucket(credentials): with boto3_bucket(**credentials) as bucket: yield bucket class TestBoto3Storage(BasicStore, UrlStore): @pytest.fixture(params=[True, False]) def reduced_redundancy(self, request): return request.param @pytest.fixture def storage_class(self, reduced_redundancy): return 'REDUCED_REDUNDANCY' if reduced_redundancy else None @pytest.fixture(params=['', '/test-prefix']) def prefix(self, request): return request.param @pytest.fixture def store(self, bucket, prefix, reduced_redundancy): return Boto3Store(bucket, prefix, reduced_redundancy=reduced_redundancy) # Disable max key length test as it leads to problems with minio test_max_key_length = None def test_get_filename_nonexistant(self, store, key, tmp_path): with pytest.raises(KeyError): store.get_file(key, os.path.join(str(tmp_path), 'a')) def test_key_error_on_nonexistant_get_filename(self, store, key, tmp_path): with pytest.raises(KeyError): store.get_file(key, os.path.join(str(tmp_path), 'a')) def test_storage_class_put( self, store, prefix, key, value, storage_class, bucket ): store.put(key, value) obj = bucket.Object(prefix.lstrip('/') + key) assert obj.storage_class == storage_class def test_storage_class_putfile( self, store, prefix, key, value, storage_class, bucket ): store.put_file(key, BytesIO(value)) obj = bucket.Object(prefix.lstrip('/') + key) assert obj.storage_class == storage_class class TestExtendedKeyspaceBoto3Store(TestBoto3Storage, ExtendedKeyspaceTests): @pytest.fixture def store(self, bucket, prefix, reduced_redundancy): class ExtendedKeyspaceStore(ExtendedKeyspaceMixin, Boto3Store): pass return ExtendedKeyspaceStore(bucket, prefix, reduced_redundancy=reduced_redundancy)
mit
-8,313,828,683,773,980,000
31.443038
80
0.690207
false
lmazuel/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/virtual_network_usage.py
1
1905
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class VirtualNetworkUsage(Model): """Usage details for subnet. Variables are only populated by the server, and will be ignored when sending a request. :ivar current_value: Indicates number of IPs used from the Subnet. :vartype current_value: float :ivar id: Subnet identifier. :vartype id: str :ivar limit: Indicates the size of the subnet. :vartype limit: float :ivar name: The name containing common and localized value for usage. :vartype name: ~azure.mgmt.network.v2017_06_01.models.VirtualNetworkUsageName :ivar unit: Usage units. Returns 'Count' :vartype unit: str """ _validation = { 'current_value': {'readonly': True}, 'id': {'readonly': True}, 'limit': {'readonly': True}, 'name': {'readonly': True}, 'unit': {'readonly': True}, } _attribute_map = { 'current_value': {'key': 'currentValue', 'type': 'float'}, 'id': {'key': 'id', 'type': 'str'}, 'limit': {'key': 'limit', 'type': 'float'}, 'name': {'key': 'name', 'type': 'VirtualNetworkUsageName'}, 'unit': {'key': 'unit', 'type': 'str'}, } def __init__(self, **kwargs): super(VirtualNetworkUsage, self).__init__(**kwargs) self.current_value = None self.id = None self.limit = None self.name = None self.unit = None
mit
1,272,538,325,083,842,600
33.017857
76
0.571654
false
jgarvin/Cogflect
cog-recipes/cogflect/CppClass.py
1
5191
#!/usr/bin/env python from cogflect.GeneratorBase import GeneratorBase from cogflect.common import generate_cppclass_common from cogflect.util import sanitizeTypename, indent import cog _body = """ typedef %(name)s::type enum_type; // TODO: iterate public/protected/private independently? // TODO: iterate over members with a tag? <-- public/protected/private tags // TODO: tag to indicate reflection is available template<typename T> typename T::type& get_member(); template<typename T> typename T::type const& get_member() const; template<unsigned index> typename %(name)s::info_index<index>::type& get_indexed_member() { return get_member< info_index<index> >(); } template<unsigned index> typename %(name)s::info_index<index>::type const& get_indexed_member() const { return get_member< info_index<index> >(); } template<typename Processor> inline void get_runtime_member(Processor& p, type value) { cogflect::pass_member_action<Processor, data> tmp(p, *this); value.switcher(tmp); } template<typename Processor> inline void get_runtime_member(Processor const& p, type value) { cogflect::pass_member_action<const Processor, data> tmp(p, *this); value.switcher(tmp); } template<typename Processor> inline void get_runtime_member(Processor& p, type value) const { cogflect::pass_member_action<Processor, const data> tmp(p, *this); value.switcher(tmp); } template<typename Processor> inline void get_runtime_member(Processor const& p, type value) const { cogflect::pass_member_action<const Processor, const data> tmp(p, *this); value.switcher(tmp); } template<typename Processor> inline void get_runtime_indexed_member(Processor& p, unsigned index) { cogflect::pass_member_action<Processor, data> tmp(p, *this); type::index_switcher(index, tmp); } template<typename Processor> inline void get_runtime_indexed_member(Processor& p, unsigned index) const { cogflect::pass_member_action<Processor, const data> tmp(p, *this); type::index_switcher(index, tmp); } template<typename Processor> inline void get_runtime_indexed_member(Processor const& p, unsigned index) { cogflect::pass_member_action<const Processor, data> tmp(p, *this); type::index_switcher(index, tmp); } template<typename Processor> inline void get_runtime_indexed_member(Processor const& p, unsigned index) const { cogflect::pass_member_action<const Processor, const data> tmp(p, *this); type::index_switcher(index, tmp); } template<class VisitorT> inline void for_all_members(VisitorT& visitor) { %(forAllMembersBody)s } template<class VisitorT> inline void for_all_members(VisitorT& visitor) const { %(forAllMembersBody)s } template<class VisitorT> inline void for_all_members(VisitorT const& visitor) { %(forAllMembersBody)s } template<class VisitorT> inline void for_all_members(VisitorT const& visitor) const { %(forAllMembersBody)s } template<class TargetType> inline void shape_assign(TargetType const& other) { cogflect::shape_assign<data, TargetType> visitor; for_all_members(visitor); } """ class CppClass(GeneratorBase): def __init__(self, name, fields, config={}): GeneratorBase.__init__(self, name, fields, config) def __gen_for_all_members(self): calls = [] call_template = ("visitor.\n" " template process_member<%s_INFO>(%s_);") for f in self.fields: calls.append(indent(call_template % (f.name, f.name.lower()), 8)) return "\n".join(calls) def generate(self): generate_cppclass_common() cog.out("namespace %s {\n" "\n" % self.name) cog.out("class data\n" "{\n" "public:") cog.out(_body % { "name" : self.name, "forAllMembersBody" : self.__gen_for_all_members() }) cog.out("private:\n") for f in self.fields: if f.type: cog.out(" %s %s_;\n" % (sanitizeTypename(f.type), f.name.lower())) cog.out("}; // class data\n\n") for f in self.fields: if f.type: cog.out("template<>\n" "inline %(name)s_INFO::type& data::get_member<%(name)s_INFO>()\n" "{\n" " return %(lower_name)s_;\n" "}\n\n" % { "name" : f.name, "lower_name" : f.name.lower() }) for f in self.fields: if f.type: cog.out("template<>\n" "inline %(name)s_INFO::type const& data::get_member<%(name)s_INFO>() const\n" "{\n" " return %(lower_name)s_;\n" "}\n\n" % { "name" : f.name, "lower_name" : f.name.lower() }) cog.out("} // namespace %s\n" % self.name)
mit
2,712,822,109,992,995,300
28.662857
101
0.587555
false
Hernrup/django-wsgiserver3
django_wsgiserver/wsgiserver/wsgiserver3.py
1
77873
"""A high-speed, production ready, thread pooled, generic HTTP server. Simplest example on how to use this module directly (without using CherryPy's application machinery):: from cherrypy import wsgiserver def my_crazy_app(environ, start_response): status = '200 OK' response_headers = [('Content-type','text/plain')] start_response(status, response_headers) return ['Hello world!'] server = wsgiserver.CherryPyWSGIServer( ('0.0.0.0', 8070), my_crazy_app, server_name='www.cherrypy.example') server.start() The CherryPy WSGI server can serve as many WSGI applications as you want in one instance by using a WSGIPathInfoDispatcher:: d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app}) server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d) Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance. This won't call the CherryPy engine (application side) at all, only the HTTP server, which is independent from the rest of CherryPy. Don't let the name "CherryPyWSGIServer" throw you; the name merely reflects its origin, not its coupling. For those of you wanting to understand internals of this module, here's the basic call flow. The server's listening thread runs a very tight loop, sticking incoming connections onto a Queue:: server = CherryPyWSGIServer(...) server.start() while True: tick() # This blocks until a request comes in: child = socket.accept() conn = HTTPConnection(child, ...) server.requests.put(conn) Worker threads are kept in a pool and poll the Queue, popping off and then handling each connection in turn. Each connection can consist of an arbitrary number of requests and their responses, so we run a nested loop:: while True: conn = server.requests.get() conn.communicate() -> while True: req = HTTPRequest(...) req.parse_request() -> # Read the Request-Line, e.g. "GET /page HTTP/1.1" req.rfile.readline() read_headers(req.rfile, req.inheaders) req.respond() -> response = app(...) try: for chunk in response: if chunk: req.write(chunk) finally: if hasattr(response, "close"): response.close() if req.close_connection: return """ __all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer', 'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile', 'CP_makefile', 'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert', 'WorkerThread', 'ThreadPool', 'SSLAdapter', 'CherryPyWSGIServer', 'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0', 'WSGIPathInfoDispatcher', 'get_ssl_adapter_class'] import os try: import queue except: import Queue as queue import re import email.utils import socket import sys if 'win' in sys.platform and hasattr(socket, "AF_INET6"): if not hasattr(socket, 'IPPROTO_IPV6'): socket.IPPROTO_IPV6 = 41 if not hasattr(socket, 'IPV6_V6ONLY'): socket.IPV6_V6ONLY = 27 if sys.version_info < (3, 1): import io else: import _pyio as io DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE import threading import time from traceback import format_exc if sys.version_info >= (3, 0): bytestr = bytes unicodestr = str basestring = (bytes, str) def ntob(n, encoding='ISO-8859-1'): """Return the given native string as a byte string in the given encoding.""" # In Python 3, the native string type is unicode return n.encode(encoding) else: bytestr = str unicodestr = unicode basestring = basestring def ntob(n, encoding='ISO-8859-1'): """Return the given native string as a byte string in the given encoding.""" # In Python 2, the native string type is bytes. Assume it's already # in the given encoding, which for ISO-8859-1 is almost always what # was intended. return n LF = ntob('\n') CRLF = ntob('\r\n') TAB = ntob('\t') SPACE = ntob(' ') COLON = ntob(':') SEMICOLON = ntob(';') EMPTY = ntob('') NUMBER_SIGN = ntob('#') QUESTION_MARK = ntob('?') ASTERISK = ntob('*') FORWARD_SLASH = ntob('/') quoted_slash = re.compile(ntob("(?i)%2F")) import errno def plat_specific_errors(*errnames): """Return error numbers for all errors in errnames on this platform. The 'errno' module contains different global constants depending on the specific platform (OS). This function will return the list of numeric values for a given list of potential names. """ errno_names = dir(errno) nums = [getattr(errno, k) for k in errnames if k in errno_names] # de-dupe the list return list(dict.fromkeys(nums).keys()) socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR") socket_errors_to_ignore = plat_specific_errors( "EPIPE", "EBADF", "WSAEBADF", "ENOTSOCK", "WSAENOTSOCK", "ETIMEDOUT", "WSAETIMEDOUT", "ECONNREFUSED", "WSAECONNREFUSED", "ECONNRESET", "WSAECONNRESET", "ECONNABORTED", "WSAECONNABORTED", "ENETRESET", "WSAENETRESET", "EHOSTDOWN", "EHOSTUNREACH", ) socket_errors_to_ignore.append("timed out") socket_errors_to_ignore.append("The read operation timed out") socket_errors_nonblocking = plat_specific_errors( 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK') comma_separated_headers = [ntob(h) for h in ['Accept', 'Accept-Charset', 'Accept-Encoding', 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control', 'Connection', 'Content-Encoding', 'Content-Language', 'Expect', 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE', 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning', 'WWW-Authenticate']] import logging if not hasattr(logging, 'statistics'): logging.statistics = {} def read_headers(rfile, hdict=None): """Read headers from the given stream into the given header dict. If hdict is None, a new header dict is created. Returns the populated header dict. Headers which are repeated are folded together using a comma if their specification so dictates. This function raises ValueError when the read bytes violate the HTTP spec. You should probably return "400 Bad Request" if this happens. """ if hdict is None: hdict = {} while True: line = rfile.readline() if not line: # No more data--illegal end of headers raise ValueError("Illegal end of headers.") if line == CRLF: # Normal end of headers break if not line.endswith(CRLF): raise ValueError("HTTP requires CRLF terminators") if line[0] in (SPACE, TAB): # It's a continuation line. v = line.strip() else: try: k, v = line.split(COLON, 1) except ValueError: raise ValueError("Illegal header line.") # logger.error("400 Bad Request: Illegal header line") # ignore illegal headers instead of cherrpy blow up, add loging !!! -clm continue # TODO: what about TE and WWW-Authenticate? k = k.strip().title() v = v.strip() hname = k if k in comma_separated_headers: existing = hdict.get(hname) if existing: v = b", ".join((existing, v)) hdict[hname] = v return hdict class MaxSizeExceeded(Exception): pass class SizeCheckWrapper(object): """Wraps a file-like object, raising MaxSizeExceeded if too large.""" def __init__(self, rfile, maxlen): self.rfile = rfile self.maxlen = maxlen self.bytes_read = 0 def _check_length(self): if self.maxlen and self.bytes_read > self.maxlen: raise MaxSizeExceeded() def read(self, size=None): data = self.rfile.read(size) self.bytes_read += len(data) self._check_length() return data def readline(self, size=None): if size is not None: data = self.rfile.readline(size) self.bytes_read += len(data) self._check_length() return data # User didn't specify a size ... # We read the line in chunks to make sure it's not a 100MB line ! res = [] while True: data = self.rfile.readline(256) self.bytes_read += len(data) self._check_length() res.append(data) # See https://bitbucket.org/cherrypy/cherrypy/issue/421 if len(data) < 256 or data[-1:].decode() == LF: return EMPTY.join(res) def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline() while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline() return lines def close(self): self.rfile.close() def __iter__(self): return self def __next__(self): data = next(self.rfile) self.bytes_read += len(data) self._check_length() return data def next(self): data = self.rfile.next() self.bytes_read += len(data) self._check_length() return data class KnownLengthRFile(object): """Wraps a file-like object, returning an empty string when exhausted.""" def __init__(self, rfile, content_length): self.rfile = rfile self.remaining = content_length def read(self, size=None): if self.remaining == 0: return b'' if size is None: size = self.remaining else: size = min(size, self.remaining) data = self.rfile.read(size) self.remaining -= len(data) return data def readline(self, size=None): if self.remaining == 0: return b'' if size is None: size = self.remaining else: size = min(size, self.remaining) data = self.rfile.readline(size) self.remaining -= len(data) return data def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline(sizehint) while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline(sizehint) return lines def close(self): self.rfile.close() def __iter__(self): return self def __next__(self): data = next(self.rfile) self.remaining -= len(data) return data class ChunkedRFile(object): """Wraps a file-like object, returning an empty string when exhausted. This class is intended to provide a conforming wsgi.input value for request entities that have been encoded with the 'chunked' transfer encoding. """ def __init__(self, rfile, maxlen, bufsize=8192): self.rfile = rfile self.maxlen = maxlen self.bytes_read = 0 self.buffer = EMPTY self.bufsize = bufsize self.closed = False def _fetch(self): if self.closed: return line = self.rfile.readline() self.bytes_read += len(line) if self.maxlen and self.bytes_read > self.maxlen: raise MaxSizeExceeded("Request Entity Too Large", self.maxlen) line = line.strip().split(SEMICOLON, 1) try: chunk_size = line.pop(0) chunk_size = int(chunk_size, 16) except ValueError: raise ValueError("Bad chunked transfer size: " + repr(chunk_size)) if chunk_size <= 0: self.closed = True return ## if line: chunk_extension = line[0] if self.maxlen and self.bytes_read + chunk_size > self.maxlen: raise IOError("Request Entity Too Large") chunk = self.rfile.read(chunk_size) self.bytes_read += len(chunk) self.buffer += chunk crlf = self.rfile.read(2) if crlf != CRLF: raise ValueError( "Bad chunked transfer coding (expected '\\r\\n', " "got " + repr(crlf) + ")") def read(self, size=None): data = EMPTY while True: if size and len(data) >= size: return data if not self.buffer: self._fetch() if not self.buffer: # EOF return data if size: remaining = size - len(data) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: data += self.buffer def readline(self, size=None): data = EMPTY while True: if size and len(data) >= size: return data if not self.buffer: self._fetch() if not self.buffer: # EOF return data newline_pos = self.buffer.find(LF) if size: if newline_pos == -1: remaining = size - len(data) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: remaining = min(size - len(data), newline_pos) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: if newline_pos == -1: data += self.buffer else: data += self.buffer[:newline_pos] self.buffer = self.buffer[newline_pos:] def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline(sizehint) while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline(sizehint) return lines def read_trailer_lines(self): if not self.closed: raise ValueError( "Cannot read trailers until the request body has been read.") while True: line = self.rfile.readline() if not line: # No more data--illegal end of headers raise ValueError("Illegal end of headers.") self.bytes_read += len(line) if self.maxlen and self.bytes_read > self.maxlen: raise IOError("Request Entity Too Large") if line == CRLF: # Normal end of headers break if not line.endswith(CRLF): raise ValueError("HTTP requires CRLF terminators") yield line def close(self): self.rfile.close() def __iter__(self): line = self.readline() while line: yield line line = self.readline() class HTTPRequest(object): """An HTTP Request (and response). A single HTTP connection may consist of multiple request/response pairs. """ server = None """The HTTPServer object which is receiving this request.""" conn = None """The HTTPConnection object on which this request connected.""" inheaders = {} """A dict of request headers.""" outheaders = [] """A list of header tuples to write in the response.""" ready = False """When True, the request has been parsed and is ready to begin generating the response. When False, signals the calling Connection that the response should not be generated and the connection should close.""" close_connection = False """Signals the calling Connection that the request should close. This does not imply an error! The client and/or server may each request that the connection be closed.""" chunked_write = False """If True, output will be encoded with the "chunked" transfer-coding. This value is set automatically inside send_headers.""" def __init__(self, server, conn): self.server = server self.conn = conn self.ready = False self.started_request = False self.scheme = ntob("http") if self.server.ssl_adapter is not None: self.scheme = ntob("https") # Use the lowest-common protocol in case read_request_line errors. self.response_protocol = 'HTTP/1.0' self.inheaders = {} self.status = "" self.outheaders = [] self.sent_headers = False self.close_connection = self.__class__.close_connection self.chunked_read = False self.chunked_write = self.__class__.chunked_write def parse_request(self): """Parse the next HTTP request start-line and message-headers.""" self.rfile = SizeCheckWrapper(self.conn.rfile, self.server.max_request_header_size) try: success = self.read_request_line() except MaxSizeExceeded: self.simple_response("414 Request-URI Too Long", "The Request-URI sent with the request exceeds the maximum " "allowed bytes.") return else: if not success: return try: success = self.read_request_headers() except MaxSizeExceeded: self.simple_response("413 Request Entity Too Large", "The headers sent with the request exceed the maximum " "allowed bytes.") return else: if not success: return self.ready = True def read_request_line(self): # HTTP/1.1 connections are persistent by default. If a client # requests a page, then idles (leaves the connection open), # then rfile.readline() will raise socket.error("timed out"). # Note that it does this based on the value given to settimeout(), # and doesn't need the client to request or acknowledge the close # (although your TCP stack might suffer for it: cf Apache's history # with FIN_WAIT_2). request_line = self.rfile.readline() # Set started_request to True so communicate() knows to send 408 # from here on out. self.started_request = True if not request_line: return False if request_line == CRLF: # RFC 2616 sec 4.1: "...if the server is reading the protocol # stream at the beginning of a message and receives a CRLF # first, it should ignore the CRLF." # But only ignore one leading line! else we enable a DoS. request_line = self.rfile.readline() if not request_line: return False if not request_line.endswith(CRLF): self.simple_response("400 Bad Request", "HTTP requires CRLF terminators") return False try: method, uri, req_protocol = request_line.strip().split(SPACE, 2) # The [x:y] slicing is necessary for byte strings to avoid getting ord's rp = int(req_protocol[5:6]), int(req_protocol[7:8]) except ValueError: self.simple_response("400 Bad Request", "Malformed Request-Line") return False self.uri = uri self.method = method # uri may be an abs_path (including "http://host.domain.tld"); scheme, authority, path = self.parse_request_uri(uri) if NUMBER_SIGN in path: self.simple_response("400 Bad Request", "Illegal #fragment in Request-URI.") return False if scheme: self.scheme = scheme qs = EMPTY if QUESTION_MARK in path: path, qs = path.split(QUESTION_MARK, 1) # Unquote the path+params (e.g. "/this%20path" -> "/this path"). # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 # # But note that "...a URI must be separated into its components # before the escaped characters within those components can be # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2 # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path". try: atoms = [self.unquote_bytes(x) for x in quoted_slash.split(path)] except ValueError: ex = sys.exc_info()[1] self.simple_response("400 Bad Request", ex.args[0]) return False path = b"%2F".join(atoms) self.path = path # Note that, like wsgiref and most other HTTP servers, # we "% HEX HEX"-unquote the path but not the query string. self.qs = qs # Compare request and server HTTP protocol versions, in case our # server does not support the requested protocol. Limit our output # to min(req, server). We want the following output: # request server actual written supported response # protocol protocol response protocol feature set # a 1.0 1.0 1.0 1.0 # b 1.0 1.1 1.1 1.0 # c 1.1 1.0 1.0 1.0 # d 1.1 1.1 1.1 1.1 # Notice that, in (b), the response will be "HTTP/1.1" even though # the client only understands 1.0. RFC 2616 10.5.6 says we should # only return 505 if the _major_ version is different. # The [x:y] slicing is necessary for byte strings to avoid getting ord's sp = int(self.server.protocol[5:6]), int(self.server.protocol[7:8]) if sp[0] != rp[0]: self.simple_response("505 HTTP Version Not Supported") return False self.request_protocol = req_protocol self.response_protocol = "HTTP/%s.%s" % min(rp, sp) return True def read_request_headers(self): """Read self.rfile into self.inheaders. Return success.""" # then all the http headers try: read_headers(self.rfile, self.inheaders) except ValueError: ex = sys.exc_info()[1] self.simple_response("400 Bad Request", ex.args[0]) return False mrbs = self.server.max_request_body_size if mrbs and int(self.inheaders.get(b"Content-Length", 0)) > mrbs: self.simple_response("413 Request Entity Too Large", "The entity sent with the request exceeds the maximum " "allowed bytes.") return False # Persistent connection support if self.response_protocol == "HTTP/1.1": # Both server and client are HTTP/1.1 if self.inheaders.get(b"Connection", b"") == b"close": self.close_connection = True else: # Either the server or client (or both) are HTTP/1.0 if self.inheaders.get(b"Connection", b"") != b"Keep-Alive": self.close_connection = True # Transfer-Encoding support te = None if self.response_protocol == "HTTP/1.1": te = self.inheaders.get(b"Transfer-Encoding") if te: te = [x.strip().lower() for x in te.split(b",") if x.strip()] self.chunked_read = False if te: for enc in te: if enc == b"chunked": self.chunked_read = True else: # Note that, even if we see "chunked", we must reject # if there is an extension we don't recognize. self.simple_response("501 Unimplemented") self.close_connection = True return False # From PEP 333: # "Servers and gateways that implement HTTP 1.1 must provide # transparent support for HTTP 1.1's "expect/continue" mechanism. # This may be done in any of several ways: # 1. Respond to requests containing an Expect: 100-continue request # with an immediate "100 Continue" response, and proceed normally. # 2. Proceed with the request normally, but provide the application # with a wsgi.input stream that will send the "100 Continue" # response if/when the application first attempts to read from # the input stream. The read request must then remain blocked # until the client responds. # 3. Wait until the client decides that the server does not support # expect/continue, and sends the request body on its own. # (This is suboptimal, and is not recommended.) # # We used to do 3, but are now doing 1. Maybe we'll do 2 someday, # but it seems like it would be a big slowdown for such a rare case. if self.inheaders.get(b"Expect", b"") == b"100-continue": # Don't use simple_response here, because it emits headers # we don't want. See https://bitbucket.org/cherrypy/cherrypy/issue/951 msg = self.server.protocol.encode('ascii') + b" 100 Continue\r\n\r\n" try: self.conn.wfile.write(msg) except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: raise return True def parse_request_uri(self, uri): """Parse a Request-URI into (scheme, authority, path). Note that Request-URI's must be one of:: Request-URI = "*" | absoluteURI | abs_path | authority Therefore, a Request-URI which starts with a double forward-slash cannot be a "net_path":: net_path = "//" authority [ abs_path ] Instead, it must be interpreted as an "abs_path" with an empty first path segment:: abs_path = "/" path_segments path_segments = segment *( "/" segment ) segment = *pchar *( ";" param ) param = *pchar """ if uri == ASTERISK: return None, None, uri scheme, sep, remainder = uri.partition(b'://') if sep and QUESTION_MARK not in scheme: # An absoluteURI. # If there's a scheme (and it must be http or https), then: # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]] authority, path_a, path_b = remainder.partition(FORWARD_SLASH) return scheme.lower(), authority, path_a + path_b if uri.startswith(FORWARD_SLASH): # An abs_path. return None, None, uri else: # An authority. return None, uri, None def unquote_bytes(self, path): """takes quoted string and unquotes % encoded values""" res = path.split(b'%') for i in range(1, len(res)): item = res[i] try: res[i] = bytes([int(item[:2], 16)]) + item[2:] except ValueError: raise return b''.join(res) def respond(self): """Call the gateway and write its iterable output.""" mrbs = self.server.max_request_body_size if self.chunked_read: self.rfile = ChunkedRFile(self.conn.rfile, mrbs) else: cl = int(self.inheaders.get(b"Content-Length", 0)) if mrbs and mrbs < cl: if not self.sent_headers: self.simple_response("413 Request Entity Too Large", "The entity sent with the request exceeds the maximum " "allowed bytes.") return self.rfile = KnownLengthRFile(self.conn.rfile, cl) self.server.gateway(self).respond() if (self.ready and not self.sent_headers): self.sent_headers = True self.send_headers() if self.chunked_write: self.conn.wfile.write(b"0\r\n\r\n") def simple_response(self, status, msg=""): """Write a simple response back to the client.""" status = str(status) buf = [bytes(self.server.protocol, "ascii") + SPACE + bytes(status, "ISO-8859-1") + CRLF, bytes("Content-Length: %s\r\n" % len(msg), "ISO-8859-1"), b"Content-Type: text/plain\r\n"] if status[:3] in ("413", "414"): # Request Entity Too Large / Request-URI Too Long self.close_connection = True if self.response_protocol == 'HTTP/1.1': # This will not be true for 414, since read_request_line # usually raises 414 before reading the whole line, and we # therefore cannot know the proper response_protocol. buf.append(b"Connection: close\r\n") else: # HTTP/1.0 had no 413/414 status nor Connection header. # Emit 400 instead and trust the message body is enough. status = "400 Bad Request" buf.append(CRLF) if msg: if isinstance(msg, str): msg = msg.encode("ISO-8859-1") buf.append(msg) try: self.conn.wfile.write(b"".join(buf)) except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: raise def write(self, chunk): """Write unbuffered data to the client.""" if self.chunked_write and chunk: buf = [bytes(hex(len(chunk)), 'ASCII')[2:], CRLF, chunk, CRLF] self.conn.wfile.write(EMPTY.join(buf)) else: self.conn.wfile.write(chunk) def send_headers(self): """Assert, process, and send the HTTP response message-headers. You must set self.status, and self.outheaders before calling this. """ hkeys = [key.lower() for key, value in self.outheaders] status = int(self.status[:3]) if status == 413: # Request Entity Too Large. Close conn to avoid garbage. self.close_connection = True elif b"content-length" not in hkeys: # "All 1xx (informational), 204 (no content), # and 304 (not modified) responses MUST NOT # include a message-body." So no point chunking. if status < 200 or status in (204, 205, 304): pass else: if (self.response_protocol == 'HTTP/1.1' and self.method != b'HEAD'): # Use the chunked transfer-coding self.chunked_write = True self.outheaders.append((b"Transfer-Encoding", b"chunked")) else: # Closing the conn is the only way to determine len. self.close_connection = True if b"connection" not in hkeys: if self.response_protocol == 'HTTP/1.1': # Both server and client are HTTP/1.1 or better if self.close_connection: self.outheaders.append((b"Connection", b"close")) else: # Server and/or client are HTTP/1.0 if not self.close_connection: self.outheaders.append((b"Connection", b"Keep-Alive")) if (not self.close_connection) and (not self.chunked_read): # Read any remaining request body data on the socket. # "If an origin server receives a request that does not include an # Expect request-header field with the "100-continue" expectation, # the request includes a request body, and the server responds # with a final status code before reading the entire request body # from the transport connection, then the server SHOULD NOT close # the transport connection until it has read the entire request, # or until the client closes the connection. Otherwise, the client # might not reliably receive the response message. However, this # requirement is not be construed as preventing a server from # defending itself against denial-of-service attacks, or from # badly broken client implementations." remaining = getattr(self.rfile, 'remaining', 0) if remaining > 0: self.rfile.read(remaining) if b"date" not in hkeys: self.outheaders.append( (b"Date", email.utils.formatdate(usegmt=True).encode('ISO-8859-1'))) if b"server" not in hkeys: self.outheaders.append( (b"Server", self.server.server_name.encode('ISO-8859-1'))) buf = [self.server.protocol.encode('ascii') + SPACE + self.status + CRLF] for k, v in self.outheaders: buf.append(k + COLON + SPACE + v + CRLF) buf.append(CRLF) self.conn.wfile.write(EMPTY.join(buf)) class NoSSLError(Exception): """Exception raised when a client speaks HTTP to an HTTPS socket.""" pass class FatalSSLAlert(Exception): """Exception raised when the SSL implementation signals a fatal alert.""" pass class CP_BufferedWriter(io.BufferedWriter): """Faux file object attached to a socket object.""" def write(self, b): self._checkClosed() if isinstance(b, str): raise TypeError("can't write str to binary stream") with self._write_lock: self._write_buf.extend(b) self._flush_unlocked() return len(b) def _flush_unlocked(self): self._checkClosed("flush of closed file") while self._write_buf: try: # ssl sockets only except 'bytes', not bytearrays # so perhaps we should conditionally wrap this for perf? n = self.raw.write(bytes(self._write_buf)) except io.BlockingIOError as e: n = e.characters_written del self._write_buf[:n] def CP_makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE): if 'r' in mode: return io.BufferedReader(socket.SocketIO(sock, mode), bufsize) else: return CP_BufferedWriter(socket.SocketIO(sock, mode), bufsize) class HTTPConnection(object): """An HTTP connection (active socket). server: the Server object which received this connection. socket: the raw socket object (usually TCP) for this connection. makefile: a fileobject class for reading from the socket. """ remote_addr = None remote_port = None ssl_env = None rbufsize = DEFAULT_BUFFER_SIZE wbufsize = DEFAULT_BUFFER_SIZE RequestHandlerClass = HTTPRequest def __init__(self, server, sock, makefile=CP_makefile): self.server = server self.socket = sock self.rfile = makefile(sock, "rb", self.rbufsize) self.wfile = makefile(sock, "wb", self.wbufsize) self.requests_seen = 0 def communicate(self): """Read each request and respond appropriately.""" request_seen = False try: while True: # (re)set req to None so that if something goes wrong in # the RequestHandlerClass constructor, the error doesn't # get written to the previous request. req = None req = self.RequestHandlerClass(self.server, self) # This order of operations should guarantee correct pipelining. req.parse_request() if self.server.stats['Enabled']: self.requests_seen += 1 if not req.ready: # Something went wrong in the parsing (and the server has # probably already made a simple_response). Return and # let the conn close. return request_seen = True req.respond() if req.close_connection: return except socket.error: e = sys.exc_info()[1] errnum = e.args[0] # sadly SSL sockets return a different (longer) time out string if errnum == 'timed out' or errnum == 'The read operation timed out': # Don't error if we're between requests; only error # if 1) no request has been started at all, or 2) we're # in the middle of a request. # See https://bitbucket.org/cherrypy/cherrypy/issue/853 if (not request_seen) or (req and req.started_request): # Don't bother writing the 408 if the response # has already started being written. if req and not req.sent_headers: try: req.simple_response("408 Request Timeout") except FatalSSLAlert: # Close the connection. return elif errnum not in socket_errors_to_ignore: self.server.error_log("socket.error %s" % repr(errnum), level=logging.WARNING, traceback=True) if req and not req.sent_headers: try: req.simple_response("500 Internal Server Error") except FatalSSLAlert: # Close the connection. return return except (KeyboardInterrupt, SystemExit): raise except FatalSSLAlert: # Close the connection. return except NoSSLError: if req and not req.sent_headers: # Unwrap our wfile self.wfile = CP_makefile(self.socket._sock, "wb", self.wbufsize) req.simple_response("400 Bad Request", "The client sent a plain HTTP request, but " "this server only speaks HTTPS on this port.") self.linger = True except Exception: e = sys.exc_info()[1] self.server.error_log(repr(e), level=logging.ERROR, traceback=True) if req and not req.sent_headers: try: req.simple_response("500 Internal Server Error") except FatalSSLAlert: # Close the connection. return linger = False def close(self): """Close the socket underlying this connection.""" self.rfile.close() if not self.linger: # Python's socket module does NOT call close on the kernel socket # when you call socket.close(). We do so manually here because we # want this server to send a FIN TCP segment immediately. Note this # must be called *before* calling socket.close(), because the latter # drops its reference to the kernel socket. # Python 3 *probably* fixed this with socket._real_close; hard to tell. # self.socket._sock.close() self.socket.close() else: # On the other hand, sometimes we want to hang around for a bit # to make sure the client has a chance to read our entire # response. Skipping the close() calls here delays the FIN # packet until the socket object is garbage-collected later. # Someday, perhaps, we'll do the full lingering_close that # Apache does, but not today. pass class TrueyZero(object): """An object which equals and does math like the integer '0' but evals True.""" def __add__(self, other): return other def __radd__(self, other): return other trueyzero = TrueyZero() _SHUTDOWNREQUEST = None class WorkerThread(threading.Thread): """Thread which continuously polls a Queue for Connection objects. Due to the timing issues of polling a Queue, a WorkerThread does not check its own 'ready' flag after it has started. To stop the thread, it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue (one for each running WorkerThread). """ conn = None """The current connection pulled off the Queue, or None.""" server = None """The HTTP Server which spawned this thread, and which owns the Queue and is placing active connections into it.""" ready = False """A simple flag for the calling server to know when this thread has begun polling the Queue.""" def __init__(self, server): self.ready = False self.server = server self.requests_seen = 0 self.bytes_read = 0 self.bytes_written = 0 self.start_time = None self.work_time = 0 self.stats = { 'Requests': lambda s: self.requests_seen + ((self.start_time is None) and trueyzero or self.conn.requests_seen), 'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and trueyzero or self.conn.rfile.bytes_read), 'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and trueyzero or self.conn.wfile.bytes_written), 'Work Time': lambda s: self.work_time + ((self.start_time is None) and trueyzero or time.time() - self.start_time), 'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6), 'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6), } threading.Thread.__init__(self) def run(self): self.server.stats['Worker Threads'][self.getName()] = self.stats try: self.ready = True while True: conn = self.server.requests.get() if conn is _SHUTDOWNREQUEST: return self.conn = conn if self.server.stats['Enabled']: self.start_time = time.time() try: conn.communicate() finally: conn.close() if self.server.stats['Enabled']: self.requests_seen += self.conn.requests_seen self.bytes_read += self.conn.rfile.bytes_read self.bytes_written += self.conn.wfile.bytes_written self.work_time += time.time() - self.start_time self.start_time = None self.conn = None except (KeyboardInterrupt, SystemExit): exc = sys.exc_info()[1] self.server.interrupt = exc class ThreadPool(object): """A Request Queue for an HTTPServer which pools threads. ThreadPool objects must provide min, get(), put(obj), start() and stop(timeout) attributes. """ def __init__(self, server, min=10, max=-1): self.server = server self.min = min self.max = max self._threads = [] self._queue = queue.Queue() self.get = self._queue.get def start(self): """Start the pool of threads.""" for i in range(self.min): self._threads.append(WorkerThread(self.server)) for worker in self._threads: worker.setName("CP Server " + worker.getName()) worker.start() for worker in self._threads: while not worker.ready: time.sleep(.1) def _get_idle(self): """Number of worker threads which are idle. Read-only.""" return len([t for t in self._threads if t.conn is None]) idle = property(_get_idle, doc=_get_idle.__doc__) def put(self, obj): self._queue.put(obj) if obj is _SHUTDOWNREQUEST: return def grow(self, amount): """Spawn new worker threads (not above self.max).""" if self.max > 0: budget = max(self.max - len(self._threads), 0) else: # self.max <= 0 indicates no maximum budget = float('inf') n_new = min(amount, budget) workers = [self._spawn_worker() for i in range(n_new)] while not all(worker.ready for worker in workers): time.sleep(.1) self._threads.extend(workers) def _spawn_worker(self): worker = WorkerThread(self.server) worker.setName("CP Server " + worker.getName()) worker.start() return worker def shrink(self, amount): """Kill off worker threads (not below self.min).""" # Grow/shrink the pool if necessary. # Remove any dead threads from our list for t in self._threads: if not t.isAlive(): self._threads.remove(t) amount -= 1 # calculate the number of threads above the minimum n_extra = max(len(self._threads) - self.min, 0) # don't remove more than amount n_to_remove = min(amount, n_extra) # put shutdown requests on the queue equal to the number of threads # to remove. As each request is processed by a worker, that worker # will terminate and be culled from the list. for n in range(n_to_remove): self._queue.put(_SHUTDOWNREQUEST) def stop(self, timeout=5): # Must shut down threads here so the code that calls # this method can know when all threads are stopped. for worker in self._threads: self._queue.put(_SHUTDOWNREQUEST) # Don't join currentThread (when stop is called inside a request). current = threading.currentThread() if timeout and timeout >= 0: endtime = time.time() + timeout while self._threads: worker = self._threads.pop() if worker is not current and worker.isAlive(): try: if timeout is None or timeout < 0: worker.join() else: remaining_time = endtime - time.time() if remaining_time > 0: worker.join(remaining_time) if worker.isAlive(): # We exhausted the timeout. # Forcibly shut down the socket. c = worker.conn if c and not c.rfile.closed: try: c.socket.shutdown(socket.SHUT_RD) except TypeError: # pyOpenSSL sockets don't take an arg c.socket.shutdown() worker.join() except (AssertionError, # Ignore repeated Ctrl-C. # See https://bitbucket.org/cherrypy/cherrypy/issue/691. KeyboardInterrupt): pass def _get_qsize(self): return self._queue.qsize() qsize = property(_get_qsize) try: import fcntl except ImportError: try: from ctypes import windll, WinError import ctypes.wintypes _SetHandleInformation = windll.kernel32.SetHandleInformation _SetHandleInformation.argtypes = [ ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ] _SetHandleInformation.restype = ctypes.wintypes.BOOL except ImportError: def prevent_socket_inheritance(sock): """Dummy function, since neither fcntl nor ctypes are available.""" pass else: def prevent_socket_inheritance(sock): """Mark the given socket fd as non-inheritable (Windows).""" if not _SetHandleInformation(sock.fileno(), 1, 0): raise WinError() else: def prevent_socket_inheritance(sock): """Mark the given socket fd as non-inheritable (POSIX).""" fd = sock.fileno() old_flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC) class SSLAdapter(object): """Base class for SSL driver library adapters. Required methods: * ``wrap(sock) -> (wrapped socket, ssl environ dict)`` * ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object`` """ def __init__(self, certificate, private_key, certificate_chain=None): self.certificate = certificate self.private_key = private_key self.certificate_chain = certificate_chain def wrap(self, sock): raise NotImplemented def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE): raise NotImplemented class HTTPServer(object): """An HTTP server.""" _bind_addr = "127.0.0.1" _interrupt = None gateway = None """A Gateway instance.""" minthreads = None """The minimum number of worker threads to create (default 10).""" maxthreads = None """The maximum number of worker threads to create (default -1 = no limit).""" server_name = None """The name of the server; defaults to socket.gethostname().""" protocol = "HTTP/1.1" """The version string to write in the Status-Line of all HTTP responses. For example, "HTTP/1.1" is the default. This also limits the supported features used in the response.""" request_queue_size = 5 """The 'backlog' arg to socket.listen(); max queued connections (default 5).""" shutdown_timeout = 5 """The total time, in seconds, to wait for worker threads to cleanly exit.""" timeout = 10 """The timeout in seconds for accepted connections (default 10).""" version = "CherryPy/3.2.4" """A version string for the HTTPServer.""" software = None """The value to set for the SERVER_SOFTWARE entry in the WSGI environ. If None, this defaults to ``'%s Server' % self.version``.""" ready = False """An internal flag which marks whether the socket is accepting connections.""" max_request_header_size = 0 """The maximum size, in bytes, for request headers, or 0 for no limit.""" max_request_body_size = 0 """The maximum size, in bytes, for request bodies, or 0 for no limit.""" nodelay = True """If True (the default since 3.1), sets the TCP_NODELAY socket option.""" ConnectionClass = HTTPConnection """The class to use for handling HTTP connections.""" ssl_adapter = None """An instance of SSLAdapter (or a subclass). You must have the corresponding SSL driver library installed.""" def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1, server_name=None): self.bind_addr = bind_addr self.gateway = gateway self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads) if not server_name: server_name = socket.gethostname() self.server_name = server_name self.clear_stats() def clear_stats(self): self._start_time = None self._run_time = 0 self.stats = { 'Enabled': False, 'Bind Address': lambda s: repr(self.bind_addr), 'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(), 'Accepts': 0, 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(), 'Queue': lambda s: getattr(self.requests, "qsize", None), 'Threads': lambda s: len(getattr(self.requests, "_threads", [])), 'Threads Idle': lambda s: getattr(self.requests, "idle", None), 'Socket Errors': 0, 'Requests': lambda s: (not s['Enabled']) and -1 or sum([w['Requests'](w) for w in s['Worker Threads'].values()], 0), 'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0), 'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Written'](w) for w in s['Worker Threads'].values()], 0), 'Work Time': lambda s: (not s['Enabled']) and -1 or sum([w['Work Time'](w) for w in s['Worker Threads'].values()], 0), 'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6) for w in s['Worker Threads'].values()], 0), 'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6) for w in s['Worker Threads'].values()], 0), 'Worker Threads': {}, } logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats def runtime(self): if self._start_time is None: return self._run_time else: return self._run_time + (time.time() - self._start_time) def __str__(self): return "%s.%s(%r)" % (self.__module__, self.__class__.__name__, self.bind_addr) def _get_bind_addr(self): return self._bind_addr def _set_bind_addr(self, value): if isinstance(value, tuple) and value[0] in ('', None): # Despite the socket module docs, using '' does not # allow AI_PASSIVE to work. Passing None instead # returns '0.0.0.0' like we want. In other words: # host AI_PASSIVE result # '' Y 192.168.x.y # '' N 192.168.x.y # None Y 0.0.0.0 # None N 127.0.0.1 # But since you can get the same effect with an explicit # '0.0.0.0', we deny both the empty string and None as values. raise ValueError("Host values of '' or None are not allowed. " "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead " "to listen on all active interfaces.") self._bind_addr = value bind_addr = property(_get_bind_addr, _set_bind_addr, doc="""The interface on which to listen for connections. For TCP sockets, a (host, port) tuple. Host values may be any IPv4 or IPv6 address, or any valid hostname. The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6). The string '0.0.0.0' is a special IPv4 entry meaning "any active interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for IPv6. The empty string or None are not allowed. For UNIX sockets, supply the filename as a string.""") def start(self): """Run the server forever.""" # We don't have to trap KeyboardInterrupt or SystemExit here, # because cherrpy.server already does so, calling self.stop() for us. # If you're using this server with another framework, you should # trap those exceptions in whatever code block calls start(). self._interrupt = None if self.software is None: self.software = "%s Server" % self.version # Select the appropriate socket if isinstance(self.bind_addr, basestring): # AF_UNIX socket # So we can reuse the socket... try: os.unlink(self.bind_addr) except: pass # So everyone can access the socket... try: os.chmod(self.bind_addr, 511) # 0777 except: pass info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)] else: # AF_INET or AF_INET6 socket # Get the correct address family for our host (allows IPv6 addresses) host, port = self.bind_addr try: info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) except socket.gaierror: if ':' in self.bind_addr[0]: info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, "", self.bind_addr + (0, 0))] else: info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)] self.socket = None msg = "No socket could be created" for res in info: af, socktype, proto, canonname, sa = res try: self.bind(af, socktype, proto) except socket.error: if self.socket: self.socket.close() self.socket = None continue break if not self.socket: raise socket.error(msg) # Timeout so KeyboardInterrupt can be caught on Win32 self.socket.settimeout(1) self.socket.listen(self.request_queue_size) # Create worker threads self.requests.start() self.ready = True self._start_time = time.time() while self.ready: try: self.tick() except (KeyboardInterrupt, SystemExit): raise except: self.error_log("Error in HTTPServer.tick", level=logging.ERROR, traceback=True) if self.interrupt: while self.interrupt is True: # Wait for self.stop() to complete. See _set_interrupt. time.sleep(0.1) if self.interrupt: raise self.interrupt def error_log(self, msg="", level=20, traceback=False): # Override this in subclasses as desired sys.stderr.write(msg + '\n') sys.stderr.flush() if traceback: tblines = format_exc() sys.stderr.write(tblines) sys.stderr.flush() def bind(self, family, type, proto=0): """Create (or recreate) the actual socket object.""" self.socket = socket.socket(family, type, proto) prevent_socket_inheritance(self.socket) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if self.nodelay and not isinstance(self.bind_addr, str): self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if self.ssl_adapter is not None: self.socket = self.ssl_adapter.bind(self.socket) # If listening on the IPV6 any address ('::' = IN6ADDR_ANY), # activate dual-stack. See https://bitbucket.org/cherrypy/cherrypy/issue/871. if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')): try: self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) except (AttributeError, socket.error): # Apparently, the socket option is not available in # this machine's TCP stack pass self.socket.bind(self.bind_addr) def tick(self): """Accept a new connection and put it on the Queue.""" try: s, addr = self.socket.accept() if self.stats['Enabled']: self.stats['Accepts'] += 1 if not self.ready: return prevent_socket_inheritance(s) if hasattr(s, 'settimeout'): s.settimeout(self.timeout) makefile = CP_makefile ssl_env = {} # if ssl cert and key are set, we try to be a secure HTTP server if self.ssl_adapter is not None: try: s, ssl_env = self.ssl_adapter.wrap(s) except NoSSLError: msg = ("The client sent a plain HTTP request, but " "this server only speaks HTTPS on this port.") buf = ["%s 400 Bad Request\r\n" % self.protocol, "Content-Length: %s\r\n" % len(msg), "Content-Type: text/plain\r\n\r\n", msg] wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE) try: wfile.write("".join(buf).encode('ISO-8859-1')) except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: raise return if not s: return makefile = self.ssl_adapter.makefile # Re-apply our timeout since we may have a new socket object if hasattr(s, 'settimeout'): s.settimeout(self.timeout) conn = self.ConnectionClass(self, s, makefile) if not isinstance(self.bind_addr, basestring): # optional values # Until we do DNS lookups, omit REMOTE_HOST if addr is None: # sometimes this can happen # figure out if AF_INET or AF_INET6. if len(s.getsockname()) == 2: # AF_INET addr = ('0.0.0.0', 0) else: # AF_INET6 addr = ('::', 0) conn.remote_addr = addr[0] conn.remote_port = addr[1] conn.ssl_env = ssl_env self.requests.put(conn) except socket.timeout: # The only reason for the timeout in start() is so we can # notice keyboard interrupts on Win32, which don't interrupt # accept() by default return except socket.error: x = sys.exc_info()[1] if self.stats['Enabled']: self.stats['Socket Errors'] += 1 if x.args[0] in socket_error_eintr: # I *think* this is right. EINTR should occur when a signal # is received during the accept() call; all docs say retry # the call, and I *think* I'm reading it right that Python # will then go ahead and poll for and handle the signal # elsewhere. See https://bitbucket.org/cherrypy/cherrypy/issue/707. return if x.args[0] in socket_errors_nonblocking: # Just try again. See https://bitbucket.org/cherrypy/cherrypy/issue/479. return if x.args[0] in socket_errors_to_ignore: # Our socket was closed. # See https://bitbucket.org/cherrypy/cherrypy/issue/686. return raise def _get_interrupt(self): return self._interrupt def _set_interrupt(self, interrupt): self._interrupt = True self.stop() self._interrupt = interrupt interrupt = property(_get_interrupt, _set_interrupt, doc="Set this to an Exception instance to " "interrupt the server.") def stop(self): """Gracefully shutdown a server that is serving forever.""" self.ready = False if self._start_time is not None: self._run_time += (time.time() - self._start_time) self._start_time = None sock = getattr(self, "socket", None) if sock: if not isinstance(self.bind_addr, basestring): # Touch our own socket to make accept() return immediately. try: host, port = sock.getsockname()[:2] except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: # Changed to use error code and not message # See https://bitbucket.org/cherrypy/cherrypy/issue/860. raise else: # Note that we're explicitly NOT using AI_PASSIVE, # here, because we want an actual IP to touch. # localhost won't work if we've bound to a public IP, # but it will if we bound to '0.0.0.0' (INADDR_ANY). for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res s = None try: s = socket.socket(af, socktype, proto) # See http://groups.google.com/group/cherrypy-users/ # browse_frm/thread/bbfe5eb39c904fe0 s.settimeout(1.0) s.connect((host, port)) s.close() except socket.error: if s: s.close() if hasattr(sock, "close"): sock.close() self.socket = None self.requests.stop(self.shutdown_timeout) class Gateway(object): """A base class to interface HTTPServer with other systems, such as WSGI.""" def __init__(self, req): self.req = req def respond(self): """Process the current request. Must be overridden in a subclass.""" raise NotImplemented # These may either be wsgiserver.SSLAdapter subclasses or the string names # of such classes (in which case they will be lazily loaded). ssl_adapters = { 'builtin': 'django_wsgiserver.wsgiserver.ssl_builtin.BuiltinSSLAdapter', } def get_ssl_adapter_class(name='builtin'): """Return an SSL adapter class for the given name.""" adapter = ssl_adapters[name.lower()] if isinstance(adapter, basestring): last_dot = adapter.rfind(".") attr_name = adapter[last_dot + 1:] mod_path = adapter[:last_dot] try: mod = sys.modules[mod_path] if mod is None: raise KeyError() except KeyError: # The last [''] is important. mod = __import__(mod_path, globals(), locals(), ['']) # Let an AttributeError propagate outward. try: adapter = getattr(mod, attr_name) except AttributeError: raise AttributeError("'%s' object has no attribute '%s'" % (mod_path, attr_name)) return adapter # -------------------------------- WSGI Stuff -------------------------------- # class CherryPyWSGIServer(HTTPServer): """A subclass of HTTPServer which calls a WSGI application.""" wsgi_version = (1, 0) """The version of WSGI to produce.""" def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None, max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5): self.requests = ThreadPool(self, min=numthreads or 1, max=max) self.wsgi_app = wsgi_app self.gateway = wsgi_gateways[self.wsgi_version] self.bind_addr = bind_addr if not server_name: server_name = socket.gethostname() self.server_name = server_name self.request_queue_size = request_queue_size self.timeout = timeout self.shutdown_timeout = shutdown_timeout self.clear_stats() def _get_numthreads(self): return self.requests.min def _set_numthreads(self, value): self.requests.min = value numthreads = property(_get_numthreads, _set_numthreads) class WSGIGateway(Gateway): """A base class to interface HTTPServer with WSGI.""" def __init__(self, req): self.req = req self.started_response = False self.env = self.get_environ() self.remaining_bytes_out = None def get_environ(self): """Return a new environ dict targeting the given wsgi.version""" raise NotImplemented def respond(self): """Process the current request.""" response = self.req.server.wsgi_app(self.env, self.start_response) try: for chunk in response: # "The start_response callable must not actually transmit # the response headers. Instead, it must store them for the # server or gateway to transmit only after the first # iteration of the application return value that yields # a NON-EMPTY string, or upon the application's first # invocation of the write() callable." (PEP 333) if chunk: if isinstance(chunk, unicodestr): chunk = chunk.encode('ISO-8859-1') self.write(chunk) finally: if hasattr(response, "close"): response.close() def start_response(self, status, headers, exc_info=None): """WSGI callable to begin the HTTP response.""" # "The application may call start_response more than once, # if and only if the exc_info argument is provided." if self.started_response and not exc_info: raise AssertionError("WSGI start_response called a second " "time with no exc_info.") self.started_response = True # "if exc_info is provided, and the HTTP headers have already been # sent, start_response must raise an error, and should raise the # exc_info tuple." if self.req.sent_headers: try: raise exc_info[0](exc_info[1]).with_traceback(exc_info[2]) finally: exc_info = None # According to PEP 3333, when using Python 3, the response status # and headers must be bytes masquerading as unicode; that is, they # must be of type "str" but are restricted to code points in the # "latin-1" set. if not isinstance(status, str): raise TypeError("WSGI response status is not of type str.") self.req.status = status.encode('ISO-8859-1') for k, v in headers: if not isinstance(k, str): raise TypeError("WSGI response header key %r is not of type str." % k) if not isinstance(v, str): raise TypeError("WSGI response header value %r is not of type str." % v) if k.lower() == 'content-length': self.remaining_bytes_out = int(v) self.req.outheaders.append((k.encode('ISO-8859-1'), v.encode('ISO-8859-1'))) return self.write def write(self, chunk): """WSGI callable to write unbuffered data to the client. This method is also used internally by start_response (to write data from the iterable returned by the WSGI application). """ if not self.started_response: raise AssertionError("WSGI write called before start_response.") chunklen = len(chunk) rbo = self.remaining_bytes_out if rbo is not None and chunklen > rbo: if not self.req.sent_headers: # Whew. We can send a 500 to the client. self.req.simple_response("500 Internal Server Error", "The requested resource returned more bytes than the " "declared Content-Length.") else: # Dang. We have probably already sent data. Truncate the chunk # to fit (so the client doesn't hang) and raise an error later. chunk = chunk[:rbo] if not self.req.sent_headers: self.req.sent_headers = True self.req.send_headers() self.req.write(chunk) if rbo is not None: rbo -= chunklen if rbo < 0: raise ValueError( "Response body exceeds the declared Content-Length.") class WSGIGateway_10(WSGIGateway): """A Gateway class to interface HTTPServer with WSGI 1.0.x.""" def get_environ(self): """Return a new environ dict targeting the given wsgi.version""" req = self.req env = { # set a non-standard environ entry so the WSGI app can know what # the *real* server protocol is (and what features to support). # See http://www.faqs.org/rfcs/rfc2145.html. 'ACTUAL_SERVER_PROTOCOL': req.server.protocol, 'PATH_INFO': req.path.decode('ISO-8859-1'), 'QUERY_STRING': req.qs.decode('ISO-8859-1'), 'REMOTE_ADDR': req.conn.remote_addr or '', 'REMOTE_PORT': str(req.conn.remote_port or ''), 'REQUEST_METHOD': req.method.decode('ISO-8859-1'), 'REQUEST_URI': req.uri, 'SCRIPT_NAME': '', 'SERVER_NAME': req.server.server_name, # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol. 'SERVER_PROTOCOL': req.request_protocol.decode('ISO-8859-1'), 'SERVER_SOFTWARE': req.server.software, 'wsgi.errors': sys.stderr, 'wsgi.input': req.rfile, 'wsgi.multiprocess': False, 'wsgi.multithread': True, 'wsgi.run_once': False, 'wsgi.url_scheme': req.scheme.decode('ISO-8859-1'), 'wsgi.version': (1, 0), } if isinstance(req.server.bind_addr, basestring): # AF_UNIX. This isn't really allowed by WSGI, which doesn't # address unix domain sockets. But it's better than nothing. env["SERVER_PORT"] = "" else: env["SERVER_PORT"] = str(req.server.bind_addr[1]) # Request headers for k, v in req.inheaders.items(): k = k.decode('ISO-8859-1').upper().replace("-", "_") env["HTTP_" + k] = v.decode('ISO-8859-1') # CONTENT_TYPE/CONTENT_LENGTH ct = env.pop("HTTP_CONTENT_TYPE", None) if ct is not None: env["CONTENT_TYPE"] = ct cl = env.pop("HTTP_CONTENT_LENGTH", None) if cl is not None: env["CONTENT_LENGTH"] = cl if req.conn.ssl_env: env.update(req.conn.ssl_env) return env class WSGIGateway_u0(WSGIGateway_10): """A Gateway class to interface HTTPServer with WSGI u.0. WSGI u.0 is an experimental protocol, which uses unicode for keys and values in both Python 2 and Python 3. """ def get_environ(self): """Return a new environ dict targeting the given wsgi.version""" req = self.req env_10 = WSGIGateway_10.get_environ(self) env = env_10.copy() env['wsgi.version'] = ('u', 0) # Request-URI env.setdefault('wsgi.url_encoding', 'utf-8') try: # SCRIPT_NAME is the empty string, who cares what encoding it is? env["PATH_INFO"] = req.path.decode(env['wsgi.url_encoding']) env["QUERY_STRING"] = req.qs.decode(env['wsgi.url_encoding']) except UnicodeDecodeError: # Fall back to latin 1 so apps can transcode if needed. env['wsgi.url_encoding'] = 'ISO-8859-1' env["PATH_INFO"] = env_10["PATH_INFO"] env["QUERY_STRING"] = env_10["QUERY_STRING"] return env wsgi_gateways = { (1, 0): WSGIGateway_10, ('u', 0): WSGIGateway_u0, } class WSGIPathInfoDispatcher(object): """A WSGI dispatcher for dispatch based on the PATH_INFO. apps: a dict or list of (path_prefix, app) pairs. """ def __init__(self, apps): try: apps = list(apps.items()) except AttributeError: pass # Sort the apps by len(path), descending apps.sort() apps.reverse() # The path_prefix strings must start, but not end, with a slash. # Use "" instead of "/". self.apps = [(p.rstrip("/"), a) for p, a in apps] def __call__(self, environ, start_response): path = environ["PATH_INFO"] or "/" for p, app in self.apps: # The apps list should be sorted by length, descending. if path.startswith(p + "/") or path == p: environ = environ.copy() environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p environ["PATH_INFO"] = path[len(p):] return app(environ, start_response) start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', '0')]) return ['']
bsd-3-clause
-1,554,800,382,253,655,600
36.206402
143
0.550114
false
InUrSys/PescArt2.0
src/Fichas_src/dialog_AmostEspecificaEspe.py
1
2223
''' Created on 05/09/2017 @author: chernomirdinmacuvele ''' from ui_AmostEspeEspecie import Ui_frmAmostEspeEspecieAmost import GenericAmostrasQDialog class dialog_AmostEspecificaEspe(GenericAmostrasQDialog.CustomForm_Amostras, Ui_frmAmostEspeEspecieAmost): def __init__(self, parent=None, TblName=None, dbcon=None, Id=None, lstValToEdit=None, dictRules = None): super(dialog_AmostEspecificaEspe, self).__init__(parent) self.setupUi(self) self.N_Sequencial_Parent = str(Id) self.tblName = TblName self.lstToEdit = lstValToEdit self.dictRules = dictRules self.setDict() self.bOK= (False, None) self.lastChecked = None self.configWidgetTriggeredEvent() self.configRules() self.configKeepTrack(id_parente=Id) self.setValuesToEdit() self.configCombox() self.PBSalvar.clicked.connect(self.operacao) self.PBCancelar.clicked.connect(self.close) def setDict(self): self.dictFields= { 'fldName': ["id", "id_comp_especifica", "id_metodo_select", "peso", "n_indiv", "comentario"], 'objName': ['id', 'id_comp_especifica', "CBMetSelecao", 'DSBPeso', 'SBN_indiv_Amostrados', 'LEComentarios'], 'fldWidget': [None, None, self.CBMetSelecao ,self.DSBPeso, self.SBN_indiv_Amostrados, self.LEComentarios ], 'isRel':[False, False,True, False, False, False], 'toDefault':[False, False, False, False, False, True], 'toCheck': [False, False, True, True, True, True], "toQuote":[False, False, True, True, False, True] } self.dictCB= { 'quer':["select null as id, '-Metodo de Selecao-' as nome union all select id, nome from ref_table where id_grupo = 'MTS' and activo =true;"], 'widget':[self.CBMetSelecao] }
gpl-3.0
-8,490,004,312,691,807,000
37.344828
166
0.536662
false
AntoineAugusti/katas
codingame/medium/heat_detector.py
1
1370
def packBounds(xMin, xMax, yMin, yMax): return [[xMin, xMax], [yMin, yMax]] def unpackBounds(bounds): xMin, xMax = bounds[0] yMin, yMax = bounds[1] return [xMin, xMax, yMin, yMax] def nextMove(width, height, x, y, direction, bounds): xMin, xMax, yMin, yMax = unpackBounds(bounds) if direction == "U": yMax = y elif direction == "UR": xMin = x yMax = y elif direction == "R": xMin = x elif direction == "DR": xMin = x yMin = y elif direction == "D": yMin = y elif direction == "DL": xMax = x yMin = y elif direction == "L": xMax = x elif direction == "UL": yMax = y xMax = x if "U" in direction or "D" in direction: y = (yMax - yMin) / 2 + yMin if "L" in direction or "R" in direction: x = (xMax - xMin) / 2 + xMin return [x, y, packBounds(xMin, xMax, yMin, yMax)] # width: width of the building. # height: height of the building. width, height = [int(i) for i in raw_input().split()] N = int(raw_input()) # maximum number of turns before game over. x, y = [int(i) for i in raw_input().split()] xMin = 0 yMin = 0 xMax = width yMax = height bounds = packBounds(xMin, xMax, yMin, yMax) # Game loop while True: # The direction of the bombs from batman's current location (U, UR, R, DR, D, DL, L or UL) direction = raw_input() x, y, bounds = nextMove(width, height, x, y, direction, bounds) print str(x) + " " + str(y)
mit
-7,396,484,664,308,771,000
22.637931
91
0.624818
false
esnet/graphite-web
webapp/graphite/util.py
1
6838
"""Copyright 2008 Orbitz WorldWide Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.""" import imp import os import socket import time import sys from os.path import splitext, basename, relpath from shutil import move from tempfile import mkstemp try: import cPickle as pickle USING_CPICKLE = True except: import pickle USING_CPICKLE = False try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from django.conf import settings from django.contrib.auth.models import User from graphite.account.models import Profile from graphite.logger import log # There are a couple different json modules floating around out there with # different APIs. Hide the ugliness here. try: import json except ImportError: import simplejson as json if hasattr(json, 'read') and not hasattr(json, 'loads'): json.loads = json.read json.dumps = json.write json.load = lambda file: json.read( file.read() ) json.dump = lambda obj, file: file.write( json.write(obj) ) def getProfile(request, allowDefault=True): if request.user.is_authenticated(): return Profile.objects.get_or_create(user=request.user)[0] elif allowDefault: return default_profile() def getProfileByUsername(username): try: return Profile.objects.get(user__username=username) except Profile.DoesNotExist: return None def is_local_interface(host): if ':' in host: host = host.split(':',1)[0] try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.connect( (host, 4242) ) local_ip = sock.getsockname()[0] sock.close() except: log.exception("Failed to open socket with %s" % host) raise if local_ip == host: return True return False def is_pattern(s): return '*' in s or '?' in s or '[' in s or '{' in s def is_escaped_pattern(s): for symbol in '*?[{': i = s.find(symbol) if i > 0: if s[i-1] == '\\': return True return False def find_escaped_pattern_fields(pattern_string): pattern_parts = pattern_string.split('.') for index,part in enumerate(pattern_parts): if is_escaped_pattern(part): yield index def default_profile(): # '!' is an unusable password. Since the default user never authenticates # this avoids creating a default (expensive!) password hash at every # default_profile() call. user, created = User.objects.get_or_create( username='default', defaults={'email': '[email protected]', 'password': '!'}) if created: log.info("Default user didn't exist, created it") profile, created = Profile.objects.get_or_create(user=user) if created: log.info("Default profile didn't exist, created it") return profile def load_module(module_path, member=None): module_name = splitext(basename(module_path))[0] module_file = open(module_path, 'U') description = ('.py', 'U', imp.PY_SOURCE) module = imp.load_module(module_name, module_file, module_path, description) if member: return getattr(module, member) else: return module def timestamp(datetime): "Convert a datetime object into epoch time" return time.mktime( datetime.timetuple() ) # This whole song & dance is due to pickle being insecure # The SafeUnpickler classes were largely derived from # http://nadiana.com/python-pickle-insecure # This code also lives in carbon.util if USING_CPICKLE: class SafeUnpickler(object): PICKLE_SAFE = { 'copy_reg': set(['_reconstructor']), '__builtin__': set(['object', 'list']), 'collections': set(['deque']), 'graphite.render.datalib': set(['TimeSeries']), 'graphite.intervals': set(['Interval', 'IntervalSet']), } @classmethod def find_class(cls, module, name): if not module in cls.PICKLE_SAFE: raise pickle.UnpicklingError('Attempting to unpickle unsafe module %s' % module) __import__(module) mod = sys.modules[module] if not name in cls.PICKLE_SAFE[module]: raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name) return getattr(mod, name) @classmethod def loads(cls, pickle_string): pickle_obj = pickle.Unpickler(StringIO(pickle_string)) pickle_obj.find_global = cls.find_class return pickle_obj.load() else: class SafeUnpickler(pickle.Unpickler): PICKLE_SAFE = { 'copy_reg': set(['_reconstructor']), '__builtin__': set(['object', 'list']), 'collections': set(['deque']), 'graphite.render.datalib': set(['TimeSeries']), 'graphite.intervals': set(['Interval', 'IntervalSet']), } def find_class(self, module, name): if not module in self.PICKLE_SAFE: raise pickle.UnpicklingError('Attempting to unpickle unsafe module %s' % module) __import__(module) mod = sys.modules[module] if not name in self.PICKLE_SAFE[module]: raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name) return getattr(mod, name) @classmethod def loads(cls, pickle_string): return cls(StringIO(pickle_string)).load() unpickle = SafeUnpickler def write_index(whisper_dir=None, ceres_dir=None, index=None): if not whisper_dir: whisper_dir = settings.WHISPER_DIR if not ceres_dir: ceres_dir = settings.CERES_DIR if not index: index = settings.INDEX_FILE try: fd, tmp = mkstemp() try: tmp_index = os.fdopen(fd, 'wt') build_index(whisper_dir, ".wsp", tmp_index) build_index(ceres_dir, ".ceres-node", tmp_index) finally: tmp_index.close() move(tmp, index) finally: try: os.unlink(tmp) except: pass return None def build_index(base_path, extension, fd): t = time.time() total_entries = 0 contents = os.walk(base_path, followlinks=True) extension_len = len(extension) for (dirpath, dirnames, filenames) in contents: path = relpath(dirpath, base_path).replace('/', '.') for metric in filenames: if metric.endswith(extension): metric = metric[:-extension_len] else: continue line = "{0}.{1}\n".format(path, metric) total_entries += 1 fd.write(line) fd.flush() log.info("[IndexSearcher] index rebuild of \"%s\" took %.6f seconds (%d entries)" % (base_path, time.time() - t, total_entries)) return None
apache-2.0
8,822,200,391,569,881,000
28.474138
130
0.674612
false
unbit/django-uwsgi
django_uwsgi/stats.py
1
1477
import os import time from datetime import datetime from django.utils.translation import ugettext_lazy as _ from . import uwsgi def get_uwsgi_stats(): uwsgi_stats = {} workers = uwsgi.workers() total_load = time.time() - uwsgi.started_on for w in workers: w['running_time'] = w['running_time'] / 1000 w['avg_rt'] = w['avg_rt'] / 1000 w['load'] = w['running_time'] / total_load / 10 / len(workers) w['last_spawn'] = datetime.fromtimestamp(w['last_spawn']) jobs = [] if uwsgi.opt.get('spooler'): spooler_jobs = uwsgi.spooler_jobs() for j in spooler_jobs: jobs.append({'file': j, 'env': uwsgi.parsefile(str(j))}) uwsgi_stats.update({ 'uwsgi': uwsgi, 'stats': [ ('loop', uwsgi.loop), ('masterpid', str(uwsgi.masterpid())), ('started_on', datetime.fromtimestamp(uwsgi.started_on)), ('now', datetime.now()), ('buffer_size', uwsgi.buffer_size), ('total_requests', uwsgi.total_requests()), ('numproc', uwsgi.numproc), ('cores', uwsgi.cores), ('cwd', os.getcwd()), ('logsize', uwsgi.logsize()), ('spooler_pid', uwsgi.spooler_pid() if uwsgi.opt.get('spooler') else _('disabled')), ('threads', _('enabled') if uwsgi.has_threads else _('disabled')) ], 'workers': workers, 'jobs': jobs }) return uwsgi_stats
mit
-3,016,437,870,239,516,000
35.02439
96
0.546378
false
inveniosoftware/invenio-oauth2server
tests/test_settings.py
1
8413
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2015-2018 CERN. # # Invenio is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. """Test settings views.""" from __future__ import absolute_import, print_function from flask import url_for from flask_babelex import gettext as _ from helpers import login from invenio_oauth2server.models import Client, Token def test_personal_token_management(settings_fixture): """Test managing personal tokens through the views.""" app = settings_fixture with app.test_request_context(): with app.test_client() as client: login(client) # Non-existing token should return 404 resp = client.get( url_for('invenio_oauth2server_settings.token_view', token_id=1000) ) resp.status_code == 404 # Get the new token form resp = client.get( url_for('invenio_oauth2server_settings.token_new') ) resp.status_code == 200 assert _('New personal access token') in str(resp.get_data()) assert '<label for="scopes-test:scope"' in str(resp.get_data()) assert '<label for="scopes-test:scope2"' in str(resp.get_data()) # Create a new token with invalid form data resp = client.post( url_for('invenio_oauth2server_settings.token_new'), data={ 'name': 'x' * (40 + 1), # max length is 40 }, follow_redirects=True ) assert resp.status_code == 200 assert 'name must be less than 40 char' in str(resp.get_data()) # Create a new token resp = client.post( url_for('invenio_oauth2server_settings.token_new'), data={ 'name': 'Test_Token', }, follow_redirects=True ) assert resp.status_code == 200 assert 'Personal access token / Test_Token' in str(resp.get_data()) assert 'test:scope' in str(resp.get_data()) assert 'test:scope2' in str(resp.get_data()) token = Token.query.first() # Rename the token resp = client.post( url_for('invenio_oauth2server_settings.token_view', token_id=token.id), data=dict(name='Test_Token_Renamed') ) assert resp.status_code == 200 assert 'Test_Token_Renamed' in str(resp.get_data()) # Token should be visible on index resp = client.get(url_for('invenio_oauth2server_settings.index')) assert resp.status_code == 200 assert 'Test_Token_Renamed' in str(resp.get_data()) # Delete the token resp = client.post( url_for('invenio_oauth2server_settings.token_view', token_id=1), data=dict(delete=True), follow_redirects=True) assert resp.status_code == 200 # Token should no longer exist on index assert 'Test_Token_Renamed' not in str(resp.get_data()) def test_authorized_app_revocation(developer_app_fixture): """Test managing authorized application tokens through the views.""" app = developer_app_fixture with app.test_request_context(): with app.test_client() as client: login(client) # Check that there is a single token for the authorized application assert Token.query.count() == 1 # Check that the authorized application is visible on index view resp = client.get(url_for('invenio_oauth2server_settings.index')) assert resp.status_code == 200 assert 'Test description' in str(resp.get_data()) assert 'Test name' in str(resp.get_data()) # Revoke the authorized application token resp = client.get( url_for('invenio_oauth2server_settings.token_revoke', token_id=1), follow_redirects=True) assert resp.status_code == 200 # Authorized application should no longer exist on index assert 'Test description' not in str(resp.get_data()) assert 'Test name' not in str(resp.get_data()) # Check that the authorized application token was actually deleted assert Token.query.count() == 0 def test_client_management(settings_fixture): """Test managing clients through the views.""" app = settings_fixture with app.test_request_context(): with app.test_client() as client: login(client) # Non-existing client should return 404 resp = client.get( url_for('invenio_oauth2server_settings.client_view', client_id=1000) ) assert resp.status_code == 404 # Create a new client resp = client.post( url_for('invenio_oauth2server_settings.client_new'), data=dict( name='Test_Client', description='Test description for Test_Client.', website='http://inveniosoftware.org/', redirect_uris=url_for( 'invenio_oauth2server_settings.index', _external=True), is_confditential=1 ), follow_redirects=True) assert resp.status_code == 200 assert 'Application / Test_Client' in str(resp.get_data()) test_client = Client.query.first() assert test_client.client_id in str(resp.get_data()) # Client should be visible on index resp = client.get(url_for('invenio_oauth2server_settings.index')) assert resp.status_code == 200 assert 'Test_Client' in str(resp.get_data()) # Reset client secret original_client_secret = test_client.client_secret resp = client.post( url_for('invenio_oauth2server_settings.client_reset', client_id=test_client.client_id), data=dict(reset='yes'), follow_redirects=True ) assert resp.status_code == 200 assert test_client.client_secret in str(resp.get_data()) assert original_client_secret not in str(resp.get_data()) # Invalid redirect uri should error original_redirect_uris = test_client.redirect_uris resp = client.post( url_for('invenio_oauth2server_settings.client_view', client_id=test_client.client_id), data=dict( name='Test_Client', description='Test description for Test_Client', website='http://inveniosoftware.org/', redirect_uris='https:/invalid', ) ) assert resp.status_code == 200 assert test_client.redirect_uris == original_redirect_uris # Modify the client resp = client.post( url_for('invenio_oauth2server_settings.client_view', client_id=test_client.client_id), data=dict( name='Modified_Name', description='Modified Description', website='http://modified-url.org', redirect_uris='https://example.org', ) ) assert resp.status_code == 200 assert 'Modified_Name' in str(resp.get_data()) assert 'Modified Description' in str(resp.get_data()) assert 'http://modified-url.org' in str(resp.get_data()) # Delete the client resp = client.post( url_for('invenio_oauth2server_settings.client_view', client_id=test_client.client_id), follow_redirects=True, data=dict(delete=True) ) assert resp.status_code == 200 assert test_client.name not in str(resp.get_data())
mit
-6,553,497,254,362,042,000
39.061905
79
0.546535
false
LouisTrezzini/projet-mairie
api/franceocr/ocr.py
1
3348
""" BSD 3-Clause License Copyright (c) 2017, Mairie de Paris All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import pytesseract import re from franceocr.config import BASEDIR from PIL import Image def ocr(image, lang="fra", config=None): image = Image.fromarray(image) return pytesseract.image_to_string(image, lang=lang, config=config) def ocr_read_number(text): text = text \ .replace('O', '0') \ .replace('I', '1') \ .replace('S', '5') \ .replace('B', '8') return text def ocr_read_text(text): text = text \ .replace('0', 'O') \ .replace('1', 'I') \ .replace('5', 'S') \ .replace('8', 'B') return text def ocr_cni(image): ocr_result = ocr( image, "franceocr", "--oem 1 --psm 7 " + BASEDIR + "/tessconfig/cni" ) ocr_result = ocr_result \ .lstrip(":") \ .replace(",", "") \ .replace(".", "") \ .strip() return re.sub(r" +", " ", ocr_result) def ocr_cni_birth_date(image): ocr_result = ocr( image, "franceocr", "--oem 1 --psm 7 " + BASEDIR + "/tessconfig/cni-birth_date" ) ocr_result = ocr_read_number(ocr_result) return ocr_result \ .replace(' ', '') \ .replace(',', '') \ .replace('.', '') def ocr_cni_birth_place(image): ocr_result = ocr( image, "franceocr", "--oem 1 --psm 7 " + BASEDIR + "/tessconfig/cni-birth_place" ) no_brackets = re.sub( r"\(.*\)", "", ocr_result ) only_alphanum = re.sub( r"[^a-zA-Z0-9' \-]", "", no_brackets ) return only_alphanum \ .lstrip(":") \ .lstrip("'") \ .strip() def ocr_cni_mrz(image): ocr_result = ocr( image, "OCRB", "--oem 0 " + BASEDIR + "/tessconfig/cni-mrz" ) return ocr_result
bsd-3-clause
-2,593,763,848,263,666,700
24.753846
78
0.628136
false
SecondLiners/GO2
band.py
1
8429
# # band class for Gig-o-Matic 2 # # Aaron Oppenheimer # 24 August 2013 # from google.appengine.ext import ndb import debug import assoc import gig import plan import stats def band_key(band_name='band_key'): """Constructs a Datastore key for a Guestbook entity with guestbook_name.""" return ndb.Key('Band', band_name) # # class for band # class Band(ndb.Model): """ Models a gig-o-matic band """ name = ndb.StringProperty() lower_name = ndb.ComputedProperty(lambda self: self.name.lower()) shortname = ndb.StringProperty() website = ndb.TextProperty() description = ndb.TextProperty() hometown = ndb.TextProperty() sections = ndb.KeyProperty(repeated=True) # instrumental sections created = ndb.DateTimeProperty(auto_now_add=True) timezone = ndb.StringProperty(default='UTC') thumbnail_img = ndb.TextProperty(default=None) images = ndb.TextProperty(repeated=True) member_links = ndb.TextProperty(default=None) share_gigs = ndb.BooleanProperty(default=True) anyone_can_manage_gigs = ndb.BooleanProperty(default=True) anyone_can_create_gigs = ndb.BooleanProperty(default=True) condensed_name = ndb.ComputedProperty(lambda self: ''.join(ch for ch in self.name if ch.isalnum()).lower()) simple_planning = ndb.BooleanProperty(default=False) plan_feedback = ndb.TextProperty() show_in_nav = ndb.BooleanProperty(default=True) send_updates_by_default = ndb.BooleanProperty(default=True) rss_feed = ndb.BooleanProperty(default=False) band_cal_feed_dirty = ndb.BooleanProperty(default=True) pub_cal_feed_dirty = ndb.BooleanProperty(default=True) new_member_message = ndb.TextProperty(default=None) @classmethod def lquery(cls, *args, **kwargs): if debug.DEBUG: print('{0} query'.format(cls.__name__)) return cls.query(*args, **kwargs) def get_band(the_band_key): """ takes a single band key or a list """ if isinstance(the_band_key, list): return ndb.get_multi(the_band_key) else: if not isinstance(the_band_key, ndb.Key): raise TypeError("get_band expects a band key") return the_band_key.get() def put_band(the_band): """ takes a single band object or a list """ if isinstance(the_band, list): return ndb.put_multi(the_band) else: if not isinstance(the_band, Band): raise TypeError("put_band expects a band") return the_band.put() def new_band(name): """ Make and return a new band """ the_band = Band(parent=band_key(), name=name) put_band(the_band) return the_band def band_key_from_urlsafe(the_band_keyurl): return ndb.Key(urlsafe=the_band_keyurl) def forget_band_from_key(the_band_key): # delete all assocs the_assoc_keys = assoc.get_assocs_of_band_key(the_band_key, confirmed_only=False, keys_only=True) ndb.delete_multi(the_assoc_keys) # delete the sections the_section_keys = get_section_keys_of_band_key(the_band_key) ndb.delete_multi(the_section_keys) # delete the gigs the_gigs = gig.get_gigs_for_band_keys(the_band_key, num=None, start_date=None) the_gig_keys = [a_gig.key for a_gig in the_gigs] # delete the plans for a_gig_key in the_gig_keys: plan_keys = plan.get_plans_for_gig_key(a_gig_key, keys_only = True) ndb.delete_multi(plan_keys) ndb.delete_multi(the_gig_keys) stats.delete_band_stats(the_band_key) # delete the band the_band_key.delete() def get_band_from_condensed_name(band_name): """ Return a Band object by name""" bands_query = Band.lquery(Band.condensed_name==band_name.lower(), ancestor=band_key()) band = bands_query.fetch(1) if len(band)==1: return band[0] else: return None def get_all_bands(keys_only=False): """ Return all objects""" bands_query = Band.lquery(ancestor=band_key()).order(Band.lower_name) all_bands = bands_query.fetch(keys_only=keys_only) return all_bands def get_assocs_of_band_key_by_section_key(the_band_key, include_occasional=True): the_band = get_band(the_band_key) the_info=[] the_map={} count=0 for s in the_band.sections: the_info.append([s,[]]) the_map[s]=count count=count+1 the_info.append([None,[]]) # for 'None' the_map[None]=count the_assocs = assoc.get_confirmed_assocs_of_band_key(the_band_key, include_occasional=include_occasional) for an_assoc in the_assocs: the_info[the_map[an_assoc.default_section]][1].append(an_assoc) if the_info[the_map[None]][1] == []: the_info.pop(the_map[None]) return the_info def get_feedback_strings(the_band): return the_band.plan_feedback.split('\n') def make_band_cal_dirty(the_band): the_band.band_cal_feed_dirty = True the_band.pub_cal_feed_dirty = True the_assocs = assoc.get_confirmed_assocs_of_band_key(the_band.key, include_occasional=True) the_member_keys = [a.member for a in the_assocs] the_members = ndb.get_multi(the_member_keys) for m in the_members: m.cal_feed_dirty = True ndb.put_multi(the_members+[the_band]) # # class for section # class Section(ndb.Model): """ Models an instrument section in a band """ name = ndb.StringProperty() def new_section(parent, name): return Section(parent=parent, name=name) def section_key_from_urlsafe(the_section_keyurl): return ndb.Key(urlsafe=the_section_keyurl) def set_section_indices(the_band): """ for every assoc in the band, set the default_section_index according to the section list in the band """ map = {} for i,s in enumerate(the_band.sections): map[s] = i map[None] = None the_assocs = assoc.get_confirmed_assocs_of_band_key(the_band.key, include_occasional=True) for a in the_assocs: a.default_section_index = map[a.default_section] ndb.put_multi(the_assocs) def new_section_for_band(the_band, the_section_name): the_section = Section(parent=the_band.key, name=the_section_name) the_section.put() if the_band.sections: if the_section not in the_band.sections: the_band.sections.append(the_section.key) else: the_band.sections=[the_section.key] the_band.put() return the_section def delete_section_key(the_section_key): # todo make sure the section is empty before deleting it # get the parent band's list of sections and delete ourselves the_band = get_band(the_section_key.parent()) if the_section_key in the_band.sections: i = the_band.sections.index(the_section_key) the_band.sections.pop(i) the_band.put() # if any member is using this section, reset them to no section assoc_keys = assoc.get_assocs_for_section_key(the_section_key, keys_only=True) if assoc_keys: assocs = assoc.get_assoc(assoc_keys) for a in assocs: a.default_section = None assoc.save_assocs(assocs) # For any gig, it's possible that the user has previously specified that he wants to play in the # section to be deleted. So, find plans with the section set, and reset the section # for that plan back to None to use the default. plan.remove_section_from_plans(the_section_key) the_section_key.delete() def get_section_keys_of_band_key(the_band_key): the_band = get_band(the_band_key) if the_band: return the_band.sections else: return [] def get_sections_from_keys(the_section_keys): return ndb.get_multi(the_section_keys) def rest_band_info(the_band, the_assoc=None, include_id=True, name_only=False): obj = { k:getattr(the_band,k) for k in ('name','shortname') } if name_only==False: for k in ('description','simple_planning'): obj[k] = getattr(the_band,k) # obj = { k:getattr(the_band,k) for k in ('name','shortname','description','simple_planning') } obj['plan_feedback'] = map(str.strip,str(the_band.plan_feedback).split("\n")) if the_band.plan_feedback else "" the_sections = get_sections_from_keys(the_band.sections) obj['sections'] = [{'name':s.name, 'id':s.key.urlsafe()} for s in the_sections] if include_id: obj['id'] = the_band.key.urlsafe() if the_assoc: obj.update( assoc.rest_assoc_info(the_assoc) ) return obj
gpl-3.0
6,071,294,075,494,389,000
29.650909
119
0.660577
false
gdsfactory/gdsfactory
pp/components/cdc.py
1
3052
from typing import Optional, Tuple import numpy as np import picwriter.components as pc import pp from pp.cell import cell from pp.component import Component from pp.components.waveguide_template import strip from pp.picwriter_to_component import picwriter_to_component from pp.types import ComponentFactory @cell def cdc( length: float = 30.0, gap: float = 0.5, period: float = 0.220, dc: float = 0.5, angle: float = np.pi / 6.0, width_top: float = 2.0, width_bot: float = 0.75, input_bot: bool = False, dw_top: Optional[float] = None, dw_bot: Optional[float] = None, fins: bool = False, fin_size: Tuple[float, float] = (0.2, 0.05), contradc_wgt: None = None, port_midpoint: Tuple[int, int] = (0, 0), direction: str = "EAST", waveguide_template: ComponentFactory = strip, **kwargs ) -> Component: """Grating-Assisted Contra-Directional Coupler Args: length : Length of the coupling region. gap: Distance between the two straights. period: Period of the grating. dc: Duty cycle of the grating. Must be between 0 and 1. angle: in radians at which the straight bends towards the coupling region. width_top: Width of the top straight in the coupling region. width_bot: Width of the bottom straight in the coupling region. dw_top: Amplitude of the width variation on the top. Default=gap/2.0. dw_bot: Amplitude of the width variation on the bottom. Default=gap/2.0. input_bot: True makes the default input the bottom straight (rather than top) fins: If `True`, adds fins to the input/output straights. In this case a different template for the component must be specified. This feature is useful when performing electron-beam lithography and using different beam currents for fine features (helps to reduce stitching errors). fin_size: Specifies the x- and y-size of the `fins`. Defaults to 200 nm x 50 nm contradc_wgt: port_midpoint: Cartesian coordinate of the input port (AT TOP if input_bot=False, AT BOTTOM if input_bot=True). direction: Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians). waveguide_template: Picwriter WaveguideTemplate function """ c = pc.ContraDirectionalCoupler( pp.call_if_func(strip, **kwargs), length=length, gap=gap, period=period, dc=dc, angle=angle, width_top=width_top, width_bot=width_bot, dw_top=dw_top, dw_bot=dw_bot, input_bot=input_bot, fins=fins, fin_size=fin_size, contradc_wgt=contradc_wgt, port=port_midpoint, direction=direction, ) component = picwriter_to_component(c) pp.port.rename_ports_by_orientation(component) return component if __name__ == "__main__": c = cdc() print(c.ports.keys()) c.show()
mit
-7,344,208,948,851,704,000
32.173913
86
0.647772
false
phil-mcdowall/weighted_dicts
tests/test_weighteddict.py
1
2375
import weighted_dict as weighteddict import pytest class TestWeightedDict(object): def test_keyerror(self): """raise key error if key not in dict""" x = weighteddict.WeightedDict() with pytest.raises(KeyError): x[1] def test_set(self): """check total of weights after addition""" x = weighteddict.WeightedDict() x[1] = "value 1",1 x[2] = "value 2",2 assert x._total == 3 assert len(x._data) == len(x._weights) def test_get(self): x = weighteddict.WeightedDict() x[1] = "value 1", 1 assert x.get(1) == "value 1" assert x.get(0,default="default_val") == "default_val" def test_set_no_weight(self): """raise TypeError if no weight specified""" x = weighteddict.WeightedDict() with pytest.raises(TypeError): x[1] = "value 1" def test_del(self): """check total of weights after deletion""" x = weighteddict.WeightedDict() x[1] = "value 1", 1 x[2] = "value 2", 2 del x[2] assert x._total == 1 assert len(x._data) == len(x._weights) def test_modify(self): """check total after changing key/value/weight""" x = weighteddict.WeightedDict() x[1] = "value 1", 1 x[2] = "value 2", 2 x[2] = "value 2", 1 assert x._total == 2 assert len(x._data) == len(x._weights) def test_pop(self): """check total""" x = weighteddict.WeightedDict() x[1] = "value 1", 1 x[2] = "value 2", 2 y = x.pop(1) assert y == "value 1" assert len(x._data) == len(x._weights) assert 1 not in x assert x._total == 2 def test_random(self): x = weighteddict.WeightedDict() x[1] = "value 1", 1 x[2] = "value 2", 0 y = x.random(1) assert y == ["value 1"] def test_weight_value(self): x = weighteddict.WeightedDict() with pytest.raises(ValueError): x[1] = "value 1", -1 with pytest.raises(TypeError): x[1] = "value 1", 'f' def test_copy(self): x = weighteddict.WeightedDict() x[1] = "value 1", 1 x[2] = "value 2", 2 y = x.copy() print(y) assert x[1] == y[1] and x[2] == y[2] and x._total == y._total
mit
-2,969,719,437,362,804,000
27.614458
69
0.515368
false
neuroo/equip
tests/test_control_flow.py
1
7408
import pytest from itertools import tee, izip from testutils import get_co, get_bytecode from equip import BytecodeObject from equip.bytecode.utils import show_bytecode import equip.utils.log as logutils from equip.utils.log import logger logutils.enableLogger(to_file='./equip.log') from equip.analysis import ControlFlow, BasicBlock SIMPLE_PROGRAM = """ import random import sys a = lambda x, y: x + (y if foo == 'bar' else x) def some_value(i): if (i % 2) == 0: print "even", elif foobar: print "whatever" else: print "odd", for n in range(2, 10): for x in range(2, n): if n % x == 0: print n, 'equals', x, '*', n/x break elif n == 2 or n in (1,2) or n / 3 == 1: continue print "foobar" else: # loop fell through without finding a factor print n, 'is a prime number' print "number: %d" % i return i - 1 def ask_ok(prompt, retries=4, complaint='Yes or no, please!'): while True: ok = raw_input(prompt) if ok in ('y', 'ye', 'yes'): return True if ok in ('n', 'no', 'nop', 'nope'): return False print False retries = retries - 1 if retries < 0: raise IOError('refusenik user') print "Never reached" print complaint if isinstance(b, string): print 'b is a string' if foobar: print "whatever" def with_stmt(something): with open('output.txt', 'w') as f: f.write('Hi there!') def exception_tests(): try: fd = open('something') except SomeException, ex: print "SomeException" except Exception, ex: print "Last Exception" finally: print "Finally" def while_loop(data, start): while start < len(data): print start start += 1 if 0 > start > 10: return -1 def test_conditions(): global FOOBAR if (a + b + something(FOOBAR)) == 0: print foo def main(): for i in range(1, random.randint()): print some_value(i) print "Call stats:" items = sys.callstats().items() items = [(value, key) for key, value in items] items.sort() items.reverse() for value,key in items: print "%30s: %30s"%(key, value) def return_Stmts(i): if i == 1: return 1 elif i == 2: return 2 print "This is something else" def jump_no_absolute(foo): if foo.bar == 1: if foo.baz == 2: if foo.buzz == 3: return some_value(foo) else: return other_value(foo) if __name__ == '__main__': main() """ def test_cflow1(): co_simple = get_co(SIMPLE_PROGRAM) assert co_simple is not None bytecode_object = BytecodeObject('<string>') bytecode_object.parse_code(co_simple) assert len(bytecode_object.declarations) == 11 for decl in bytecode_object.declarations: cflow = ControlFlow(decl) assert cflow.blocks is not None assert len(cflow.graph.roots()) == 1 assert len(cflow.dominators.dom) > 0 cflow.block_constraints is not None for block in cflow.block_constraints: cstr = cflow.block_constraints[block] assert cstr.tree is not None logger.debug("Constraint: %s", cstr) cdg = cflow.control_dependence WHILE_CASE = """ while i < length: i += 1 print i """ def test_while_loop(): co_simple = get_co(WHILE_CASE) assert co_simple is not None bytecode_object = BytecodeObject('<string>') bytecode_object.parse_code(co_simple) assert len(bytecode_object.declarations) == 1 for decl in bytecode_object.declarations: cflow = ControlFlow(decl) assert cflow.blocks is not None assert len(cflow.dominators.dom) > 0 IF_STMTS_CASE = """ if i == 1: print 1 elif i == 2: print 2 elif i % 0 == 1: print 'elif' else: print 'final-case' """ def test_if_statements(): co_simple = get_co(IF_STMTS_CASE) assert co_simple is not None bytecode_object = BytecodeObject('<string>') bytecode_object.parse_code(co_simple) assert len(bytecode_object.declarations) == 1 for decl in bytecode_object.declarations: cflow = ControlFlow(decl) assert cflow.blocks is not None assert len(cflow.dominators.dom) > 0 LOOP_BREAK_CASE = """ def func(): while i < length: if i % 2 == 0: break for j in range(0, 10): k = 0 for k in range(0, 10): l = 0 for l in range(0, 10): print j, k, l if l == 2: break elif l == 3: return print "end-l-loop" if k == 2: break print "end-k-loop" print "end-j-loop" print "Final" """ def test_loop_breaks(): logger.debug("test_loop_breaks") co_simple = get_co(LOOP_BREAK_CASE) assert co_simple is not None bytecode_object = BytecodeObject('<string>') bytecode_object.parse_code(co_simple) assert len(bytecode_object.declarations) == 2 for decl in bytecode_object.declarations: cflow = ControlFlow(decl) assert cflow.blocks is not None assert len(cflow.dominators.dom) > 0 CONDITION_CASE = """ if a + b + c < 2: print 'foo' elif ((a & 0xff != 0) and 2 + something(foobar ** 2) + 1 != 0) or 1 == 2: print 'bar' """ def test_conditions(): logger.debug("test_conditions") co_simple = get_co(CONDITION_CASE) assert co_simple is not None bytecode_object = BytecodeObject('<string>') bytecode_object.parse_code(co_simple) for decl in bytecode_object.declarations: cflow = ControlFlow(decl) cflow.block_constraints CONSTRAINT_EQ_CASE = """ def f1(): if a > 0 and a > 0: print 'dood' def f2(): if a + b > 0 and b + a > 0: print 'dood' def f3(): # Note that this fails if we remove the parens around b/2 since # the comparison operator doesn't get the distributivity (only the # commutativity of operators) if a * (b * (1/2)) > 0 and a * ((1/2) * b) > 0: print 'dood' """ def get_pairs(iterable): a, b = tee(iterable) next(b, None) return izip(a, b) def test_constraint_equivalence(): logger.debug("test_conditions") co_simple = get_co(CONSTRAINT_EQ_CASE) assert co_simple is not None bytecode_object = BytecodeObject('<string>') bytecode_object.parse_code(co_simple) for decl in bytecode_object.declarations: cflow = ControlFlow(decl) all_constraints = list() for block in cflow.block_constraints: logger.debug("Cstr: %s", cflow.block_constraints[block].tree) all_constraints.append(cflow.block_constraints[block].tree) for cstr1, cstr2 in get_pairs(all_constraints): assert cstr1 == cstr2 LIST_COMP_CASE = """ def f1(a): if a == 1: lst = [d for d in a] return lst else: lst1 = [(foo, bar) for foo, bar in a.getValues() if foo != 'some' if bar != 'ddd' if bar != 'ddd' if bar != 'ddd'] # list comp lst2 = ((foo, bar) for foo, bar in a.getValues() if foo != 'some') # gen pyt = ((x, y, z) for z in integers() \ for y in xrange(1, z) \ for x in range(1, y) \ if x*x + y*y == z*z) print pyt return [] """ def test_list_comprehension(): logger.debug("test_list_comprehension") co_simple = get_co(LIST_COMP_CASE) assert co_simple is not None bytecode_object = BytecodeObject('<string>') bytecode_object.parse_code(co_simple) for decl in bytecode_object.declarations: cflow = ControlFlow(decl) for block in cflow.blocks: for stmt in block.statements: if stmt.native is not None: logger.debug("%s", stmt)
apache-2.0
2,534,338,296,392,331,300
21.313253
130
0.620545
false
windelbouwman/ppci-mirror
ppci/utils/bitfun.py
1
6113
""" Module full of bit manipulating helper classes. """ def rotate_right(v, n): """ bit-wise Rotate right n times """ mask = (2 ** n) - 1 mask_bits = v & mask return (v >> n) | (mask_bits << (32 - n)) def rotate_left(v, n): assert n >= 0 assert n < 32 return rotate_right(v, 32 - n) def rotl(v, count, bits): """ Rotate v left count bits """ mask = (1 << bits) - 1 count = count % bits return ((v << count) & mask) | (v >> (bits - count)) def reverse_bits(v, bits): """ Do bit reversal operation. Example input (8 bits case): 11100001 10000111 """ y = 0 pos = bits - 1 while pos > 0: y += (v & 1) << pos v >>= 1 pos -= 1 return y def rotr(v, count, bits): """ Rotate v right count bits """ mask = (1 << bits) - 1 count = count % bits return (v >> count) | ((v << (bits - count)) & mask) def to_signed(value, bits): return correct(value, bits, True) def to_unsigned(value, bits): return correct(value, bits, False) def correct(value, bits, signed): base = 1 << bits value %= base if signed and value.bit_length() == bits: return value - base else: return value def clz(v: int, bits: int) -> int: """ count leading zeroes """ mask = 1 << (bits - 1) count = 0 while (count < bits) and (v & mask) == 0: count += 1 v = v * 2 return count def ctz(v: int, bits: int) -> int: """ count trailing zeroes """ count = 0 while count < bits and (v % 2) == 0: count += 1 v //= 2 return count def popcnt(v: int, bits: int) -> int: """ count number of one bits """ count = 0 for i in range(bits): if v & (1 << i): count += 1 return count def sign_extend(value: int, bits: int) -> int: """ Perform sign extension operation. """ sign_bit = 1 << (bits - 1) mask = sign_bit - 1 return (value & mask) - (value & sign_bit) def value_to_bytes_big_endian(value: int, size: int): """ Pack integer value into bytes """ byte_numbers = reversed(range(size)) return bytes((value >> (x * 8)) & 0xFF for x in byte_numbers) def value_to_bits(v, bits): """ Convert a value to a list of booleans """ b = [] for i in range(bits): b.append(bool((1 << i) & v)) return b def bits_to_bytes(bits): """ Convert a sequence of booleans into bytes """ while len(bits) % 8 != 0: bits.append(False) m = bytearray() for i in range(0, len(bits), 8): v = 0 for j in range(8): if bits[i + j]: v = v | (1 << j) m.append(v) return bytes(m) def encode_imm32(v): """ Bundle 32 bit value into 4 bits rotation and 8 bits value """ for i in range(0, 16): v2 = rotate_left(v, i * 2) if (v2 & 0xFFFFFF00) == 0: rotation = i val = v2 & 0xFF x = (rotation << 8) | val return x raise ValueError("Invalid value {}".format(v)) def align(value, m): """ Increase value to a multiple of m """ while (value % m) != 0: value = value + 1 return value def wrap_negative(value, bits): """ Make a bitmask of a value, even if it is a negative value ! """ upper_limit = (1 << (bits)) - 1 lower_limit = -(1 << (bits - 1)) if value not in range(lower_limit, upper_limit + 1): raise ValueError( "Cannot encode {} in {} bits [{},{}]".format( value, bits, lower_limit, upper_limit ) ) mask = (1 << bits) - 1 bit_value = value & mask # Performing bitwise and makes it 2 complement. assert bit_value >= 0 return bit_value def inrange(value, bits): """ Test if a signed value can be fit into the given number of bits """ upper_limit = 1 << (bits - 1) lower_limit = -(1 << (bits - 1)) return value in range(lower_limit, upper_limit) class BitView: """ A convenience window on a set of bits, to fiddle them easily construct the bitview on a bytearray at a given start index and a given length. """ def __init__(self, data, begin, length): self.data = data self.begin = begin self.length = length # TODO: implement endianess! def __setitem__(self, key, value): if type(key) is slice: assert key.step is None bits = key.stop - key.start assert bits > 0 assert key.stop <= self.length * 8 limit = 1 << bits assert value < limit # We can begin, process per byte for j in range(self.length): bitpos1 = j * 8 bitpos2 = bitpos1 + 8 # If we start after the last bit of this byte, carry on if key.start >= bitpos2: continue # If we stop before the first byte, stop if key.stop <= bitpos1: break # We are ready to fiddle! if key.start > bitpos1: p1 = key.start else: p1 = bitpos1 if key.stop < bitpos2: p2 = key.stop else: p2 = bitpos2 # print('p1, p2=', p1, p2) bitsize = p2 - p1 bitmask = (1 << bitsize) - 1 # Determine the mask: mask = bitmask << (p1 - bitpos1) # Determine the new value of the bits: bits = (bitmask & (value >> (p1 - key.start))) << ( p1 - bitpos1 ) # print('mask', hex(mask), 'bitsize=', bitsize, hex(bits)) # Determine the byte index: idx = self.begin + j # Clear bits: self.data[idx] &= 0xFF ^ mask # Set bits: self.data[idx] |= bits else: # pragma: no cover raise KeyError()
bsd-2-clause
-2,151,311,591,168,351,700
24.684874
77
0.495501
false
lancekrogers/music-network
cleff/cleff/settings.py
1
4462
""" Django settings for cleff project. Generated by 'django-admin startproject' using Django 1.8.3. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'l^*dx&83(z(mxp5l7_8h8v4z3%vm9^#3&69li987h16v&qe^4x' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] AUTH_PROFILE_MODULE = "profiles.models.ProfileModel" # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'rest_framework.authtoken', 'Forum', 'profiles', 'cleff_main', 'stickyuploads', 'messaging', 'geoposition', 'haystack', 'widget_tweaks', # 'social.apps.django_app.default', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'cleff.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # 'social.apps.django_app.context_processors.backends', # 'social.apps.django_app.context_processors.login_redirect', ], }, }, ] WSGI_APPLICATION = 'cleff.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'cleff', # 'db_name', 'USER': 'cleff', # 'db_user', } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), ) # STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(BASE_DIR, "static/media") # /Users/lancerogers/Developer/music-network/music-network/cleff/static/media" MEDIA_URL = '/media/' STATIC_ROOT = os.path.join(BASE_DIR, '/static/') #STATIC_URL = '/static/' #EDIA_ROOT = os.path.join(BASE_DIR, # "static/media") # /Users/lancerogers/Developer/music-network/music-network/cleff/static/media" #MEDIA_URL = '/media/' # AUTHENTICATION_BACKENDS = ( # 'social.backends.facebook.FacebookOAuth2', # 'social.backends.google.GoogleOAuth2', # 'social.backends.twitter.TwitterOAuth', # 'django.contrib.auth.backends.ModelBackend', # ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.media', ) HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine', 'URL': 'http://127.0.0.1:9200/', # 'URL': 'http://192.168.1.82:9200/', 'INDEX_NAME': 'haystack', 'INCLUDE_SPELLING': 'False', }, } HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor' ''' GEOPOSITION_MAP_OPTIONS = { 'minZoom': 3, 'maxZoom': 8, } GEOPOSITION_MARKER_OPTIONS = { 'cursor': 'move' } '''
apache-2.0
-182,834,558,739,971,970
24.352273
122
0.665397
false
DataDog/integrations-core
datadog_checks_base/datadog_checks/base/checks/win/winpdh.py
1
11346
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import time from collections import defaultdict import win32pdh from six import iteritems, text_type from six.moves import winreg DATA_TYPE_INT = win32pdh.PDH_FMT_LONG DATA_TYPE_DOUBLE = win32pdh.PDH_FMT_DOUBLE DATA_POINT_INTERVAL = 0.10 SINGLE_INSTANCE_KEY = "__single_instance" class WinPDHCounter(object): # store the dictionary of pdh counter names pdh_counter_dict = defaultdict(list) _use_en_counter_names = False def __init__(self, en_class_name, en_counter_name, log, instance_name=None, machine_name=None, precision=None): self.counterdict = {} self.logger = log self._counter_name = en_counter_name self._en_class_name = en_class_name self._instance_name = instance_name self._machine_name = machine_name self._is_single_instance = False if precision is None: self._precision = win32pdh.PDH_FMT_DOUBLE else: self._precision = precision class_name_index_list = [] try: self._get_counter_dictionary() class_name_index_list = WinPDHCounter.pdh_counter_dict[en_class_name] except WindowsError: WinPDHCounter._use_en_counter_names = True self.logger.warning("Unable to get counter translations; attempting default English names") except Exception as e: self.logger.error("Exception loading counter strings %s", str(e)) raise if WinPDHCounter._use_en_counter_names: self._class_name = en_class_name else: if len(class_name_index_list) == 0: self.logger.warning("Class %s was not in counter name list, attempting english counter", en_class_name) self._class_name = en_class_name else: if len(class_name_index_list) > 1: self.logger.warning( "Class %s had multiple (%d) indices, using first", en_class_name, len(class_name_index_list) ) self._class_name = win32pdh.LookupPerfNameByIndex(None, int(class_name_index_list[0])) self.hq = win32pdh.OpenQuery() self.collect_counters() if len(self.counterdict) == 0: raise AttributeError("No valid counters to report") def __del__(self): try: win32pdh.CloseQuery(self.hq) except AttributeError: # An error occurred during instantiation before a query was opened. pass def is_single_instance(self): return self._is_single_instance @property def class_name(self): """Returns the counter class name. The value is localized to the system but falls back to the english class name if the counter translations can't be fetched.""" return self._class_name @property def english_class_name(self): """Always return the english version of the counter class name.""" return self._en_class_name def get_single_value(self): if not self.is_single_instance(): raise ValueError('counter is not single instance %s %s' % (self.class_name, self._counter_name)) vals = self.get_all_values() return vals[SINGLE_INSTANCE_KEY] def get_all_values(self): ret = {} # self will retrieve the list of all object names in the class (i.e. all the network interface # names in the class "network interface" win32pdh.CollectQueryData(self.hq) for inst, counter_handle in iteritems(self.counterdict): try: t, val = win32pdh.GetFormattedCounterValue(counter_handle, self._precision) ret[inst] = val except Exception: # exception usually means self type needs two data points to calculate. Wait # a bit and try again time.sleep(DATA_POINT_INTERVAL) win32pdh.CollectQueryData(self.hq) # if we get exception self time, just return it up t, val = win32pdh.GetFormattedCounterValue(counter_handle, self._precision) ret[inst] = val return ret def _get_counter_dictionary(self): if WinPDHCounter.pdh_counter_dict: # already populated return if WinPDHCounter._use_en_counter_names: # already found out the registry isn't there return try: val, t = winreg.QueryValueEx(winreg.HKEY_PERFORMANCE_DATA, "Counter 009") except: # noqa: E722, B001 self.logger.error("Windows error; performance counters not found in registry") self.logger.error("Performance counters may need to be rebuilt.") raise # val is an array of strings. The underlying win32 API returns a list of strings # which is the counter name, counter index, counter name, counter index (in windows, # a multi-string value) # # the python implementation translates the multi-string value into an array of strings. # the array of strings then becomes # array[0] = counter_index_1 # array[1] = counter_name_1 # array[2] = counter_index_2 # array[3] = counter_name_2 # # see https://support.microsoft.com/en-us/help/287159/using-pdh-apis-correctly-in-a-localized-language # for more detail # create a table of the keys to the counter index, because we want to look up # by counter name. Some systems may have an odd number of entries, don't # accidentaly index at val[len(val] for idx in range(0, len(val) - 1, 2): WinPDHCounter.pdh_counter_dict[val[idx + 1]].append(val[idx]) def _make_counter_path(self, machine_name, en_counter_name, instance_name, counters): """ When handling non english versions, the counters don't work quite as documented. This is because strings like "Bytes Sent/sec" might appear multiple times in the english master, and might not have mappings for each index. Search each index, and make sure the requested counter name actually appears in the list of available counters; that's the counter we'll use. """ path = "" if WinPDHCounter._use_en_counter_names: """ In this case, we don't have any translations. Just attempt to make the counter path """ try: path = win32pdh.MakeCounterPath( (machine_name, self.class_name, instance_name, None, 0, en_counter_name) ) self.logger.debug("Successfully created English-only path") except Exception as e: # noqa: E722, B001 self.logger.warning("Unable to create English-only path %s", e) raise return path counter_name_index_list = WinPDHCounter.pdh_counter_dict[en_counter_name] for index in counter_name_index_list: c = win32pdh.LookupPerfNameByIndex(None, int(index)) if c is None or len(c) == 0: self.logger.debug("Index %s not found, skipping", index) continue # check to see if this counter is in the list of counters for this class if c not in counters: try: self.logger.debug("Index %s counter %s not in counter list", index, text_type(c)) except: # noqa: E722, B001 # some unicode characters are not translatable here. Don't fail just # because we couldn't log self.logger.debug("Index %s not in counter list", index) continue # see if we can create a counter try: path = win32pdh.MakeCounterPath((machine_name, self.class_name, instance_name, None, 0, c)) break except: # noqa: E722, B001 try: self.logger.info("Unable to make path with counter %s, trying next available", text_type(c)) except: # noqa: E722, B001 self.logger.info("Unable to make path with counter index %s, trying next available", index) return path def collect_counters(self): counters, instances = win32pdh.EnumObjectItems( None, self._machine_name, self.class_name, win32pdh.PERF_DETAIL_WIZARD ) if self._instance_name is None and len(instances) > 0: all_instances = set() for inst in instances: path = self._make_counter_path(self._machine_name, self._counter_name, inst, counters) if not path: continue all_instances.add(inst) try: if inst not in self.counterdict: self.logger.debug('Adding instance `%s`', inst) self.counterdict[inst] = win32pdh.AddCounter(self.hq, path) except: # noqa: E722, B001 self.logger.fatal( "Failed to create counter. No instances of %s\\%s" % (self.class_name, self._counter_name) ) expired_instances = set(self.counterdict) - all_instances for inst in expired_instances: self.logger.debug('Removing expired instance `%s`', inst) del self.counterdict[inst] else: if self._instance_name is not None: # check to see that it's valid if len(instances) <= 0: self.logger.error( "%s doesn't seem to be a multi-instance counter, but asked for specific instance %s", self.class_name, self._instance_name, ) raise AttributeError("%s is not a multi-instance counter" % self.class_name) if self._instance_name not in instances: self.logger.error("%s is not a counter instance in %s", self._instance_name, self.class_name) raise AttributeError("%s is not an instance of %s" % (self._instance_name, self.class_name)) path = self._make_counter_path(self._machine_name, self._counter_name, self._instance_name, counters) if not path: self.logger.warning("Empty path returned") elif win32pdh.ValidatePath(path) != 0: # Multi-instance counter with no instances presently pass else: try: if SINGLE_INSTANCE_KEY not in self.counterdict: self.logger.debug('Adding single instance for path `%s`', path) self.counterdict[SINGLE_INSTANCE_KEY] = win32pdh.AddCounter(self.hq, path) except: # noqa: E722, B001 self.logger.fatal( "Failed to create counter. No instances of %s\\%s" % (self.class_name, self._counter_name) ) raise self._is_single_instance = True
bsd-3-clause
7,040,604,954,250,838,000
42.638462
119
0.581438
false
readw/210CT-Coursework
Basic-Py/6-Reverse.py
1
3321
# Week 3 - 6) Write the pseudocode and code for a function that reverses the # words in a sentance. Input: "This is awesome" Output: "awesome # this Is". Give the Big O notation. ''' PSEUDOCODE - ITERATIVE ---------------------- REVERSE_ORDER(s) rev <- SPLIT s BY " " reversed <- "" FOR i IN COUNTDOWN length of rev TO 0 IF i != 0 reversed += rev[i]+" " ELSE reversed += rev[i] RETURN reversed PSEUDOCODE - RECURSIVE ---------------------- REVERSE_ORDER(s, length) IF length = 0 RETURN s[0] ELSE RETURN REVERSE_ORDER(s[0-length:], length-1) & " " & s[0] ''' ####################### ## Iterative Version ## ####################### def reverseOrderIter(string): '''Iterative Solution that reverses the order of all seperate words within a passed string.''' # Example: n=3 # Set reversed string as an empty string. reversedString = "" # O(1) --> O(1) # Loop through the list in reverse order. for i in range(len(string)-1,-1,-1): # O(n) --> O(3) # If it isn't the last value in the array. if i != 0: # O(n) --> O(3) # Append reversedString with the value and a space. reversedString += string[i]+" " # O(n) --> O(3) # If it is the last value in the array. else: # O(n) --> O(3) # Appened reversed String with the word. reversedString += string[i] # O(n) --> O(3) return reversedString # O(1) --> O(1) ####################### ## Recursive Version ## ####################### def reverseOrderRec(sentence, length): '''Recursive Solution that reverses the order of all seperate words within a passed string.''' # Example: n=3 # If the length of the string array is 0. if length == 0: # O(1) --> O(1) # Return back the selected value. return sentence[0] # O(1) --> O(1) else: # O(1) --> O(1) # Call the function passing the array back and taking 1 from the length. return reverseOrderRec(sentence[0-length:], length-1)+" "+sentence[0] # O(n) --> O(3) if __name__ == "__main__": while True: try: # User inputs a phrase and it is created into a list. sentance = input("Please enter a sentance: ").split(" ") # Calls iterative reverse order function. print("Result (Iterative): "+reverseOrderIter(sentance)) # Calls recursive reverse order function. print("Result (Recursive): "+reverseOrderRec(sentance,len(sentance)-1)) except: break
gpl-3.0
7,979,030,324,996,267,000
44.493151
100
0.428184
false
PaulWay/insights-core
insights/core/archives.py
1
4729
#!/usr/bin/env python import logging import os import shlex import subprocess import tempfile from insights.core.marshalling import Marshaller from insights.util import subproc, fs try: import insights.contrib.magic as magic except Exception: raise ImportError("You need to install the 'file' RPM.") else: _magic = magic.open(magic.MIME_TYPE) _magic.load() _magic_inner = magic.open(magic.MIME_TYPE | magic.MAGIC_COMPRESS) _magic_inner.load() logger = logging.getLogger(__name__) marshaller = Marshaller() class InvalidArchive(Exception): def __init__(self, msg): super(InvalidArchive, self).__init__(msg) self.msg = msg class InvalidContentType(InvalidArchive): def __init__(self, content_type): self.msg = 'Invalid content type: "%s"' % content_type super(InvalidContentType, self).__init__(self.msg) self.content_type = content_type class Extractor(object): """ Abstract base class for extraction of archives into usable objects. """ def __init__(self, timeout=150): self.tmp_dir = None self.timeout = timeout def from_buffer(self, buf): pass def from_path(self, path): pass def getnames(self): return self.tar_file.getnames() def extractfile(self, name): return self.tar_file.extractfile(name) def cleanup(self): if self.tmp_dir: fs.remove(self.tmp_dir, chmod=True) def issym(self, name): return self.tar_file.issym(name) def isdir(self, name): return self.tar_file.isdir(name) def __enter__(self): return self def __exit__(self, a, b, c): self.cleanup() class ZipExtractor(Extractor): def from_buffer(self, buf): with tempfile.NamedTemporaryFile() as tf: tf.write(buf) tf.flush() return self.from_path(tf.name) def from_path(self, path): self.tmp_dir = tempfile.mkdtemp() command = "unzip %s -d %s" % (path, self.tmp_dir) subprocess.call(shlex.split(command)) self.tar_file = DirectoryAdapter(self.tmp_dir) return self class TarExtractor(Extractor): TAR_FLAGS = { "application/x-xz": "-J", "application/x-gzip": "-z", "application/gzip": "-z", "application/x-bzip2": "-j", "application/x-tar": "" } def _assert_type(self, _input, is_buffer=False): method = 'buffer' if is_buffer else 'file' self.content_type = getattr(_magic, method)(_input) if self.content_type not in self.TAR_FLAGS: raise InvalidContentType(self.content_type) inner_type = getattr(_magic_inner, method)(_input) if inner_type != 'application/x-tar': raise InvalidArchive('No compressed tar archive') def from_buffer(self, buf): self._assert_type(buf, True) tar_flag = self.TAR_FLAGS.get(self.content_type) self.tmp_dir = tempfile.mkdtemp() command = "tar %s -x -f - -C %s" % (tar_flag, self.tmp_dir) p = subprocess.Popen(shlex.split(command), stdin=subprocess.PIPE) p.stdin.write(buf) p.stdin.close() p.communicate() self.tar_file = DirectoryAdapter(self.tmp_dir) return self def from_path(self, path, extract_dir=None): if os.path.isdir(path): self.tar_file = DirectoryAdapter(path) else: self._assert_type(path, False) tar_flag = self.TAR_FLAGS.get(self.content_type) self.tmp_dir = tempfile.mkdtemp(dir=extract_dir) command = "tar %s -x --exclude=*/dev/null -f %s -C %s" % (tar_flag, path, self.tmp_dir) logging.info("Extracting files in '%s'", self.tmp_dir) subproc.call(command, timeout=self.timeout) self.tar_file = DirectoryAdapter(self.tmp_dir) return self class DirectoryAdapter(object): """ This class takes a path to a directory and provides a subset of the methods that a tarfile object provides. """ def __init__(self, path): self.path = path self.names = [] for root, dirs, files in os.walk(self.path): for dirname in dirs: self.names.append(os.path.join(root, dirname) + "/") for filename in files: self.names.append(os.path.join(root, filename)) def getnames(self): return self.names def extractfile(self, name): with open(name, "rb") as fp: return fp.read() def issym(self, name): return os.path.islink(name) def isdir(self, name): return os.path.isdir(name) def close(self): pass
apache-2.0
1,995,818,427,947,127,800
26.817647
99
0.601819
false
xuru/pyvisdk
pyvisdk/do/perf_metric_series_csv.py
1
1028
import logging from pyvisdk.exceptions import InvalidArgumentError ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) def PerfMetricSeriesCSV(vim, *args, **kwargs): '''This data object type represents a PerfMetricSeries encoded in CSV format.''' obj = vim.client.factory.create('ns0:PerfMetricSeriesCSV') # do some validation checking... if (len(args) + len(kwargs)) < 1: raise IndexError('Expected at least 2 arguments got: %d' % len(args)) required = [ 'id' ] optional = [ 'value', 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
mit
-1,112,465,584,048,227,200
30.181818
124
0.597276
false
bundlewrap/bundlewrap
bundlewrap/items/svc_openrc.py
1
3360
from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def svc_start(node, svcname): return node.run(f"rc-service {quote(svcname)} start", may_fail=True) def svc_running(node, svcname): result = node.run(f"rc-service {quote(svcname)} status", may_fail=True) return result.return_code == 0 and "started" in result.stdout_text def svc_stop(node, svcname): return node.run(f"rc-service {quote(svcname)} stop", may_fail=True) def svc_enable(node, svcname): return node.run(f"rc-update add {quote(svcname)}", may_fail=True) def svc_enabled(node, svcname): result = node.run( f"rc-update show default | grep -w {quote(svcname)}", may_fail=True ) return result.return_code == 0 and svcname in result.stdout_text def svc_disable(node, svcname): return node.run(f"rc-update del {quote(svcname)}", may_fail=True) class SvcOpenRC(Item): """ A service managed by OpenRC init scripts. """ BUNDLE_ATTRIBUTE_NAME = "svc_openrc" ITEM_ATTRIBUTES = { "running": True, "enabled": True, } ITEM_TYPE_NAME = "svc_openrc" def __repr__(self): return "<SvcOpenRC name:{} enabled:{} running:{}>".format( self.name, self.attributes["enabled"], self.attributes["running"], ) def fix(self, status): if "enabled" in status.keys_to_fix: if self.attributes["enabled"]: svc_enable(self.node, self.name) else: svc_disable(self.node, self.name) if "running" in status.keys_to_fix: if self.attributes["running"]: svc_start(self.node, self.name) else: svc_stop(self.node, self.name) def get_canned_actions(self): return { "stop": { "command": f"rc-service {self.name} stop", "needed_by": {self.id}, }, "restart": { "command": f"rc-service {self.name} restart", "needs": {self.id}, }, "reload": { "command": f"rc-service {self.name} reload".format(self.name), "needs": { # make sure we don't reload and restart simultaneously f"{self.id}:restart", # with only the dep on restart, we might still end # up reloading if the service itself is skipped # because the restart action has cascade_skip False self.id, }, }, } def sdict(self): return { "enabled": svc_enabled(self.node, self.name), "running": svc_running(self.node, self.name), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): for attribute in ("enabled", "running"): if attributes.get(attribute, None) not in (True, False, None): raise BundleError( _( "expected boolean or None for '{attribute}' on {item} in bundle '{bundle}'" ).format( attribute=attribute, bundle=bundle.name, item=item_id, ) )
gpl-3.0
7,016,225,537,355,085,000
31
99
0.54881
false
sekikn/ambari
ambari-server/src/main/python/ambari_server/setupActions.py
2
1913
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' # action commands SETUP_ACTION = "setup" START_ACTION = "start" PSTART_ACTION = "pstart" STOP_ACTION = "stop" RESTART_ACTION = "restart" RESET_ACTION = "reset" UPGRADE_ACTION = "upgrade" REFRESH_STACK_HASH_ACTION = "refresh-stack-hash" STATUS_ACTION = "status" SETUP_HTTPS_ACTION = "setup-https" SETUP_JDBC_ACTION = "setup-jdbc" LDAP_SETUP_ACTION = "setup-ldap" SETUP_SSO_ACTION = "setup-sso" LDAP_SYNC_ACTION = "sync-ldap" SET_CURRENT_ACTION = "set-current" SETUP_GANGLIA_HTTPS_ACTION = "setup-ganglia-https" ENCRYPT_PASSWORDS_ACTION = "encrypt-passwords" SETUP_SECURITY_ACTION = "setup-security" UPDATE_HOST_NAMES_ACTION = "update-host-names" CHECK_DATABASE_ACTION = "check-database" BACKUP_ACTION = "backup" RESTORE_ACTION = "restore" SETUP_JCE_ACTION = "setup-jce" ENABLE_STACK_ACTION = "enable-stack" DB_PURGE_ACTION = "db-purge-history" INSTALL_MPACK_ACTION = "install-mpack" UNINSTALL_MPACK_ACTION = "uninstall-mpack" UPGRADE_MPACK_ACTION = "upgrade-mpack" PAM_SETUP_ACTION = "setup-pam" MIGRATE_LDAP_PAM_ACTION = "migrate-ldap-pam" KERBEROS_SETUP_ACTION = "setup-kerberos" SETUP_TPROXY_ACTION = "setup-trusted-proxy"
apache-2.0
5,927,159,286,697,128,000
35.09434
72
0.765813
false
jasonzio/azure-linux-extensions
VMEncryption/main/DiskUtil.py
1
39985
#!/usr/bin/env python # # VMEncryption extension # # Copyright 2015 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import json import os import os.path import re import shlex import sys from subprocess import * import shutil import traceback import uuid import glob from EncryptionConfig import EncryptionConfig from DecryptionMarkConfig import DecryptionMarkConfig from EncryptionMarkConfig import EncryptionMarkConfig from TransactionalCopyTask import TransactionalCopyTask from CommandExecutor import * from Common import * class DiskUtil(object): def __init__(self, hutil, patching, logger, encryption_environment): self.encryption_environment = encryption_environment self.hutil = hutil self.distro_patcher = patching self.logger = logger self.ide_class_id = "{32412632-86cb-44a2-9b5c-50d1417354f5}" self.vmbus_sys_path = '/sys/bus/vmbus/devices' self.command_executor = CommandExecutor(self.logger) def copy(self, ongoing_item_config, status_prefix=''): copy_task = TransactionalCopyTask(logger=self.logger, disk_util=self, hutil=self.hutil, ongoing_item_config=ongoing_item_config, patching=self.distro_patcher, encryption_environment=self.encryption_environment, status_prefix=status_prefix) try: mem_fs_result = copy_task.prepare_mem_fs() if mem_fs_result != CommonVariables.process_success: return CommonVariables.tmpfs_error else: return copy_task.begin_copy() except Exception as e: message = "Failed to perform dd copy: {0}, stack trace: {1}".format(e, traceback.format_exc()) self.logger.log(msg=message, level=CommonVariables.ErrorLevel) finally: copy_task.clear_mem_fs() def format_disk(self, dev_path, file_system): mkfs_command = "" if file_system == "ext4": mkfs_command = "mkfs.ext4" elif file_system == "ext3": mkfs_command = "mkfs.ext3" elif file_system == "xfs": mkfs_command = "mkfs.xfs" elif file_system == "btrfs": mkfs_command = "mkfs.btrfs" mkfs_cmd = "{0} {1}".format(mkfs_command, dev_path) return self.command_executor.Execute(mkfs_cmd) def make_sure_path_exists(self, path): mkdir_cmd = self.distro_patcher.mkdir_path + ' -p ' + path self.logger.log("make sure path exists, executing: {0}".format(mkdir_cmd)) return self.command_executor.Execute(mkdir_cmd) def touch_file(self, path): mkdir_cmd = self.distro_patcher.touch_path + ' ' + path self.logger.log("touching file, executing: {0}".format(mkdir_cmd)) return self.command_executor.Execute(mkdir_cmd) def get_crypt_items(self): crypt_items = [] rootfs_crypt_item_found = False if not os.path.exists(self.encryption_environment.azure_crypt_mount_config_path): self.logger.log("{0} does not exist".format(self.encryption_environment.azure_crypt_mount_config_path)) else: with open(self.encryption_environment.azure_crypt_mount_config_path,'r') as f: for line in f: if not line.strip(): continue crypt_mount_item_properties = line.strip().split() crypt_item = CryptItem() crypt_item.mapper_name = crypt_mount_item_properties[0] crypt_item.dev_path = crypt_mount_item_properties[1] header_file_path = None if crypt_mount_item_properties[2] and crypt_mount_item_properties[2] != "None": header_file_path = crypt_mount_item_properties[2] crypt_item.luks_header_path = header_file_path crypt_item.mount_point = crypt_mount_item_properties[3] if crypt_item.mount_point == "/": rootfs_crypt_item_found = True crypt_item.file_system = crypt_mount_item_properties[4] crypt_item.uses_cleartext_key = True if crypt_mount_item_properties[5] == "True" else False try: crypt_item.current_luks_slot = int(crypt_mount_item_properties[6]) except IndexError: crypt_item.current_luks_slot = -1 crypt_items.append(crypt_item) encryption_status = json.loads(self.get_encryption_status()) if encryption_status["os"] == "Encrypted" and not rootfs_crypt_item_found: crypt_item = CryptItem() crypt_item.mapper_name = "osencrypt" proc_comm = ProcessCommunicator() grep_result = self.command_executor.ExecuteInBash("cryptsetup status osencrypt | grep device:", communicator=proc_comm) if grep_result == 0: crypt_item.dev_path = proc_comm.stdout.strip().split()[1] else: proc_comm = ProcessCommunicator() self.command_executor.Execute("dmsetup table --target crypt", communicator=proc_comm) for line in proc_comm.stdout.splitlines(): if 'osencrypt' in line: majmin = filter(lambda p: re.match(r'\d+:\d+', p), line.split())[0] src_device = filter(lambda d: d.majmin == majmin, self.get_device_items(None))[0] crypt_item.dev_path = '/dev/' + src_device.name break rootfs_dev = next((m for m in self.get_mount_items() if m["dest"] == "/")) crypt_item.file_system = rootfs_dev["fs"] if not crypt_item.dev_path: raise Exception("Could not locate block device for rootfs") crypt_item.luks_header_path = "/boot/luks/osluksheader" if not os.path.exists(crypt_item.luks_header_path): crypt_item.luks_header_path = crypt_item.dev_path crypt_item.mount_point = "/" crypt_item.uses_cleartext_key = False crypt_item.current_luks_slot = -1 crypt_items.append(crypt_item) return crypt_items def add_crypt_item(self, crypt_item): """ TODO we should judge that the second time. format is like this: <target name> <source device> <key file> <options> """ try: if not crypt_item.luks_header_path: crypt_item.luks_header_path = "None" mount_content_item = (crypt_item.mapper_name + " " + crypt_item.dev_path + " " + crypt_item.luks_header_path + " " + crypt_item.mount_point + " " + crypt_item.file_system + " " + str(crypt_item.uses_cleartext_key) + " " + str(crypt_item.current_luks_slot)) if os.path.exists(self.encryption_environment.azure_crypt_mount_config_path): with open(self.encryption_environment.azure_crypt_mount_config_path,'r') as f: existing_content = f.read() if existing_content is not None and existing_content.strip() != "": new_mount_content = existing_content + "\n" + mount_content_item else: new_mount_content = mount_content_item else: new_mount_content = mount_content_item with open(self.encryption_environment.azure_crypt_mount_config_path,'w') as wf: wf.write('\n') wf.write(new_mount_content) wf.write('\n') return True except Exception as e: return False def remove_crypt_item(self, crypt_item): if not os.path.exists(self.encryption_environment.azure_crypt_mount_config_path): return False try: mount_lines = [] with open(self.encryption_environment.azure_crypt_mount_config_path, 'r') as f: mount_lines = f.readlines() filtered_mount_lines = filter(lambda line: not crypt_item.mapper_name in line, mount_lines) with open(self.encryption_environment.azure_crypt_mount_config_path, 'w') as wf: wf.write('\n') wf.write('\n'.join(filtered_mount_lines)) wf.write('\n') return True except Exception as e: return False def update_crypt_item(self, crypt_item): self.logger.log("Updating entry for crypt item {0}".format(crypt_item)) self.remove_crypt_item(crypt_item) self.add_crypt_item(crypt_item) def create_luks_header(self, mapper_name): luks_header_file_path = self.encryption_environment.luks_header_base_path + mapper_name if not os.path.exists(luks_header_file_path): dd_command = self.distro_patcher.dd_path + ' if=/dev/zero bs=33554432 count=1 > ' + luks_header_file_path self.command_executor.ExecuteInBash(dd_command, raise_exception_on_failure=True) return luks_header_file_path def create_cleartext_key(self, mapper_name): cleartext_key_file_path = self.encryption_environment.cleartext_key_base_path + mapper_name if not os.path.exists(cleartext_key_file_path): dd_command = self.distro_patcher.dd_path + ' if=/dev/urandom bs=128 count=1 > ' + cleartext_key_file_path self.command_executor.ExecuteInBash(dd_command, raise_exception_on_failure=True) return cleartext_key_file_path def encrypt_disk(self, dev_path, passphrase_file, mapper_name, header_file): return_code = self.luks_format(passphrase_file=passphrase_file, dev_path=dev_path, header_file=header_file) if return_code != CommonVariables.process_success: self.logger.log(msg=('cryptsetup luksFormat failed, return_code is:{0}'.format(return_code)), level=CommonVariables.ErrorLevel) return return_code else: return_code = self.luks_open(passphrase_file=passphrase_file, dev_path=dev_path, mapper_name=mapper_name, header_file=header_file, uses_cleartext_key=False) if return_code != CommonVariables.process_success: self.logger.log(msg=('cryptsetup luksOpen failed, return_code is:{0}'.format(return_code)), level=CommonVariables.ErrorLevel) return return_code def check_fs(self, dev_path): self.logger.log("checking fs:" + str(dev_path)) check_fs_cmd = self.distro_patcher.e2fsck_path + " -f -y " + dev_path return self.command_executor.Execute(check_fs_cmd) def expand_fs(self, dev_path): expandfs_cmd = self.distro_patcher.resize2fs_path + " " + str(dev_path) return self.command_executor.Execute(expandfs_cmd) def shrink_fs(self, dev_path, size_shrink_to): """ size_shrink_to is in sector (512 byte) """ shrinkfs_cmd = self.distro_patcher.resize2fs_path + ' ' + str(dev_path) + ' ' + str(size_shrink_to) + 's' return self.command_executor.Execute(shrinkfs_cmd) def check_shrink_fs(self, dev_path, size_shrink_to): return_code = self.check_fs(dev_path) if return_code == CommonVariables.process_success: return_code = self.shrink_fs(dev_path = dev_path, size_shrink_to = size_shrink_to) return return_code else: return return_code def luks_format(self, passphrase_file, dev_path, header_file): """ return the return code of the process for error handling. """ self.hutil.log("dev path to cryptsetup luksFormat {0}".format(dev_path)) #walkaround for sles sp3 if self.distro_patcher.distro_info[0].lower() == 'suse' and self.distro_patcher.distro_info[1] == '11': proc_comm = ProcessCommunicator() passphrase_cmd = self.distro_patcher.cat_path + ' ' + passphrase_file self.command_executor.Execute(passphrase_cmd, communicator=proc_comm) passphrase = proc_comm.stdout cryptsetup_cmd = "{0} luksFormat {1} -q".format(self.distro_patcher.cryptsetup_path, dev_path) return self.command_executor.Execute(cryptsetup_cmd, input=passphrase) else: if header_file is not None: cryptsetup_cmd = "{0} luksFormat {1} --header {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path , dev_path , header_file , passphrase_file) else: cryptsetup_cmd = "{0} luksFormat {1} -d {2} -q".format(self.distro_patcher.cryptsetup_path , dev_path , passphrase_file) return self.command_executor.Execute(cryptsetup_cmd) def luks_add_key(self, passphrase_file, dev_path, mapper_name, header_file, new_key_path): """ return the return code of the process for error handling. """ self.hutil.log("new key path: " + (new_key_path)) if not os.path.exists(new_key_path): self.hutil.error("new key does not exist") return None if header_file: cryptsetup_cmd = "{0} luksAddKey {1} {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path, header_file, new_key_path, passphrase_file) else: cryptsetup_cmd = "{0} luksAddKey {1} {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path, dev_path, new_key_path, passphrase_file) return self.command_executor.Execute(cryptsetup_cmd) def luks_remove_key(self, passphrase_file, dev_path, header_file): """ return the return code of the process for error handling. """ self.hutil.log("removing keyslot: {0}".format(passphrase_file)) if header_file: cryptsetup_cmd = "{0} luksRemoveKey {1} -d {2} -q".format(self.distro_patcher.cryptsetup_path, header_file, passphrase_file) else: cryptsetup_cmd = "{0} luksRemoveKey {1} -d {2} -q".format(self.distro_patcher.cryptsetup_path, dev_path, passphrase_file) return self.command_executor.Execute(cryptsetup_cmd) def luks_kill_slot(self, passphrase_file, dev_path, header_file, keyslot): """ return the return code of the process for error handling. """ self.hutil.log("killing keyslot: {0}".format(keyslot)) if header_file: cryptsetup_cmd = "{0} luksKillSlot {1} {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path, header_file, keyslot, passphrase_file) else: cryptsetup_cmd = "{0} luksKillSlot {1} {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path, dev_path, keyslot, passphrase_file) return self.command_executor.Execute(cryptsetup_cmd) def luks_add_cleartext_key(self, passphrase_file, dev_path, mapper_name, header_file): """ return the return code of the process for error handling. """ cleartext_key_file_path = self.encryption_environment.cleartext_key_base_path + mapper_name self.hutil.log("cleartext key path: " + (cleartext_key_file_path)) return self.luks_add_key(passphrase_file, dev_path, mapper_name, header_file, cleartext_key_file_path) def luks_dump_keyslots(self, dev_path, header_file): cryptsetup_cmd = "" if header_file: cryptsetup_cmd = "{0} luksDump {1}".format(self.distro_patcher.cryptsetup_path, header_file) else: cryptsetup_cmd = "{0} luksDump {1}".format(self.distro_patcher.cryptsetup_path, dev_path) proc_comm = ProcessCommunicator() self.command_executor.Execute(cryptsetup_cmd, communicator=proc_comm) lines = filter(lambda l: "key slot" in l.lower(), proc_comm.stdout.split("\n")) keyslots = map(lambda l: "enabled" in l.lower(), lines) return keyslots def luks_open(self, passphrase_file, dev_path, mapper_name, header_file, uses_cleartext_key): """ return the return code of the process for error handling. """ self.hutil.log("dev mapper name to cryptsetup luksOpen " + (mapper_name)) if uses_cleartext_key: passphrase_file = self.encryption_environment.cleartext_key_base_path + mapper_name self.hutil.log("keyfile: " + (passphrase_file)) if header_file: cryptsetup_cmd = "{0} luksOpen {1} {2} --header {3} -d {4} -q".format(self.distro_patcher.cryptsetup_path , dev_path , mapper_name, header_file , passphrase_file) else: cryptsetup_cmd = "{0} luksOpen {1} {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path , dev_path , mapper_name , passphrase_file) return self.command_executor.Execute(cryptsetup_cmd) def luks_close(self, mapper_name): """ returns the exit code for cryptsetup process. """ self.hutil.log("dev mapper name to cryptsetup luksOpen " + (mapper_name)) cryptsetup_cmd = "{0} luksClose {1} -q".format(self.distro_patcher.cryptsetup_path, mapper_name) return self.command_executor.Execute(cryptsetup_cmd) #TODO error handling. def append_mount_info(self, dev_path, mount_point): shutil.copy2('/etc/fstab', '/etc/fstab.backup.' + str(str(uuid.uuid4()))) mount_content_item = dev_path + " " + mount_point + " auto defaults 0 0" new_mount_content = "" with open("/etc/fstab",'r') as f: existing_content = f.read() new_mount_content = existing_content + "\n" + mount_content_item with open("/etc/fstab",'w') as wf: wf.write(new_mount_content) def remove_mount_info(self, mount_point): if not mount_point: self.logger.log("remove_mount_info: mount_point is empty") return shutil.copy2('/etc/fstab', '/etc/fstab.backup.' + str(str(uuid.uuid4()))) filtered_contents = [] removed_lines = [] with open('/etc/fstab', 'r') as f: for line in f.readlines(): line = line.strip() pattern = '\s' + re.escape(mount_point) + '\s' if re.search(pattern, line): self.logger.log("removing fstab line: {0}".format(line)) removed_lines.append(line) continue filtered_contents.append(line) with open('/etc/fstab', 'w') as f: f.write('\n') f.write('\n'.join(filtered_contents)) f.write('\n') self.logger.log("fstab updated successfully") with open('/etc/fstab.azure.backup', 'a+') as f: f.write('\n') f.write('\n'.join(removed_lines)) f.write('\n') self.logger.log("fstab.azure.backup updated successfully") def restore_mount_info(self, mount_point): if not mount_point: self.logger.log("restore_mount_info: mount_point is empty") return shutil.copy2('/etc/fstab', '/etc/fstab.backup.' + str(str(uuid.uuid4()))) filtered_contents = [] removed_lines = [] with open('/etc/fstab.azure.backup', 'r') as f: for line in f.readlines(): line = line.strip() pattern = '\s' + re.escape(mount_point) + '\s' if re.search(pattern, line): self.logger.log("removing fstab.azure.backup line: {0}".format(line)) removed_lines.append(line) continue filtered_contents.append(line) with open('/etc/fstab.azure.backup', 'w') as f: f.write('\n') f.write('\n'.join(filtered_contents)) f.write('\n') self.logger.log("fstab.azure.backup updated successfully") with open('/etc/fstab', 'a+') as f: f.write('\n') f.write('\n'.join(removed_lines)) f.write('\n') self.logger.log("fstab updated successfully") def mount_filesystem(self, dev_path, mount_point, file_system=None): """ mount the file system. """ self.make_sure_path_exists(mount_point) return_code = -1 if file_system is None: mount_cmd = self.distro_patcher.mount_path + ' ' + dev_path + ' ' + mount_point else: mount_cmd = self.distro_patcher.mount_path + ' ' + dev_path + ' ' + mount_point + ' -t ' + file_system return self.command_executor.Execute(mount_cmd) def mount_crypt_item(self, crypt_item, passphrase): self.logger.log("trying to mount the crypt item:" + str(crypt_item)) mount_filesystem_result = self.mount_filesystem(os.path.join('/dev/mapper', crypt_item.mapper_name), crypt_item.mount_point, crypt_item.file_system) self.logger.log("mount file system result:{0}".format(mount_filesystem_result)) def umount(self, path): umount_cmd = self.distro_patcher.umount_path + ' ' + path return self.command_executor.Execute(umount_cmd) def umount_all_crypt_items(self): for crypt_item in self.get_crypt_items(): self.logger.log("Unmounting {0}".format(crypt_item.mount_point)) self.umount(crypt_item.mount_point) def mount_all(self): mount_all_cmd = self.distro_patcher.mount_path + ' -a' return self.command_executor.Execute(mount_all_cmd) def get_mount_items(self): items = [] for line in file('/proc/mounts'): line = [s.decode('string_escape') for s in line.split()] item = { "src": line[0], "dest": line[1], "fs": line[2] } items.append(item) return items def get_encryption_status(self): encryption_status = { "data": "NotEncrypted", "os": "NotEncrypted" } mount_items = self.get_mount_items() os_drive_encrypted = False data_drives_found = False data_drives_encrypted = True for mount_item in mount_items: if mount_item["fs"] in ["ext2", "ext4", "ext3", "xfs"] and \ not "/mnt" == mount_item["dest"] and \ not "/" == mount_item["dest"] and \ not "/oldroot/mnt/resource" == mount_item["dest"] and \ not "/oldroot/boot" == mount_item["dest"] and \ not "/oldroot" == mount_item["dest"] and \ not "/mnt/resource" == mount_item["dest"] and \ not "/boot" == mount_item["dest"]: data_drives_found = True if not "/dev/mapper" in mount_item["src"]: self.logger.log("Data volume {0} is mounted from {1}".format(mount_item["dest"], mount_item["src"])) data_drives_encrypted = False if mount_item["dest"] == "/" and \ "/dev/mapper" in mount_item["src"] or \ "/dev/dm" in mount_item["src"]: self.logger.log("OS volume {0} is mounted from {1}".format(mount_item["dest"], mount_item["src"])) os_drive_encrypted = True if not data_drives_found: encryption_status["data"] = "NotMounted" elif data_drives_encrypted: encryption_status["data"] = "Encrypted" if os_drive_encrypted: encryption_status["os"] = "Encrypted" encryption_marker = EncryptionMarkConfig(self.logger, self.encryption_environment) decryption_marker = DecryptionMarkConfig(self.logger, self.encryption_environment) if decryption_marker.config_file_exists(): encryption_status["data"] = "DecryptionInProgress" elif encryption_marker.config_file_exists(): encryption_config = EncryptionConfig(self.encryption_environment, self.logger) volume_type = encryption_config.get_volume_type().lower() if volume_type == CommonVariables.VolumeTypeData.lower() or \ volume_type == CommonVariables.VolumeTypeAll.lower(): encryption_status["data"] = "EncryptionInProgress" if volume_type == CommonVariables.VolumeTypeOS.lower() or \ volume_type == CommonVariables.VolumeTypeAll.lower(): encryption_status["os"] = "EncryptionInProgress" elif os.path.exists('/dev/mapper/osencrypt') and not os_drive_encrypted: encryption_status["os"] = "VMRestartPending" return json.dumps(encryption_status) def query_dev_sdx_path_by_scsi_id(self, scsi_number): p = Popen([self.distro_patcher.lsscsi_path, scsi_number], stdout=subprocess.PIPE, stderr=subprocess.PIPE) identity, err = p.communicate() # identity sample: [5:0:0:0] disk Msft Virtual Disk 1.0 /dev/sdc self.logger.log("lsscsi output is: {0}\n".format(identity)) vals = identity.split() if vals is None or len(vals) == 0: return None sdx_path = vals[len(vals) - 1] return sdx_path def query_dev_id_path_by_sdx_path(self, sdx_path): """ return /dev/disk/by-id that maps to the sdx_path, otherwise return the original path """ for disk_by_id in os.listdir(CommonVariables.disk_by_id_root): disk_by_id_path = os.path.join(CommonVariables.disk_by_id_root, disk_by_id) if os.path.realpath(disk_by_id_path) == sdx_path: return disk_by_id_path return sdx_path def query_dev_uuid_path_by_sdx_path(self, sdx_path): """ the behaviour is if we could get the uuid, then return, if not, just return the sdx. """ self.logger.log("querying the sdx path of:{0}".format(sdx_path)) #blkid path p = Popen([self.distro_patcher.blkid_path, sdx_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE) identity, err = p.communicate() identity = identity.lower() self.logger.log("blkid output is: \n" + identity) uuid_pattern = 'uuid="' index_of_uuid = identity.find(uuid_pattern) identity = identity[index_of_uuid + len(uuid_pattern):] index_of_quote = identity.find('"') uuid = identity[0:index_of_quote] if uuid.strip() == "": #TODO this is strange? BUGBUG return sdx_path return os.path.join("/dev/disk/by-uuid/", uuid) def query_dev_uuid_path_by_scsi_number(self, scsi_number): # find the scsi using the filter # TODO figure out why the disk formated using fdisk do not have uuid sdx_path = self.query_dev_sdx_path_by_scsi_id(scsi_number) return self.query_dev_uuid_path_by_sdx_path(sdx_path) def get_device_items_property(self, dev_name, property_name): self.logger.log("getting property of device {0}".format(dev_name)) device_path = None if os.path.exists("/dev/" + dev_name): device_path = "/dev/" + dev_name elif os.path.exists("/dev/mapper/" + dev_name): device_path = "/dev/mapper/" + dev_name if property_name == "SIZE": get_property_cmd = self.distro_patcher.blockdev_path + " --getsize64 " + device_path proc_comm = ProcessCommunicator() self.command_executor.Execute(get_property_cmd, communicator=proc_comm) return proc_comm.stdout.strip() else: get_property_cmd = self.distro_patcher.lsblk_path + " " + device_path + " -b -nl -o NAME," + property_name proc_comm = ProcessCommunicator() self.command_executor.Execute(get_property_cmd, communicator=proc_comm, raise_exception_on_failure=True) for line in proc_comm.stdout.splitlines(): if line.strip(): disk_info_item_array = line.strip().split() if dev_name == disk_info_item_array[0]: if len(disk_info_item_array) > 1: return disk_info_item_array[1] return def get_device_items_sles(self, dev_path): self.logger.log(msg=("getting the blk info from:{0}".format(dev_path))) device_items_to_return = [] device_items = [] #first get all the device names if dev_path is None: lsblk_command = 'lsblk -b -nl -o NAME' else: lsblk_command = 'lsblk -b -nl -o NAME ' + dev_path proc_comm = ProcessCommunicator() self.command_executor.Execute(lsblk_command, communicator=proc_comm, raise_exception_on_failure=True) for line in proc_comm.stdout.splitlines(): item_value_str = line.strip() if item_value_str: device_item = DeviceItem() device_item.name = item_value_str.split()[0] device_items.append(device_item) for device_item in device_items: device_item.file_system = self.get_device_items_property(dev_name=device_item.name, property_name='FSTYPE') device_item.mount_point = self.get_device_items_property(dev_name=device_item.name, property_name='MOUNTPOINT') device_item.label = self.get_device_items_property(dev_name=device_item.name, property_name='LABEL') device_item.uuid = self.get_device_items_property(dev_name=device_item.name, property_name='UUID') device_item.majmin = self.get_device_items_property(dev_name=device_item.name, property_name='MAJ:MIN') # get the type of device model_file_path = '/sys/block/' + device_item.name + '/device/model' if os.path.exists(model_file_path): with open(model_file_path, 'r') as f: device_item.model = f.read().strip() else: self.logger.log(msg=("no model file found for device {0}".format(device_item.name))) if device_item.model == 'Virtual Disk': self.logger.log(msg="model is virtual disk") device_item.type = 'disk' else: partition_files = glob.glob('/sys/block/*/' + device_item.name + '/partition') self.logger.log(msg="partition files exists") if partition_files is not None and len(partition_files) > 0: device_item.type = 'part' size_string = self.get_device_items_property(dev_name=device_item.name, property_name='SIZE') if size_string is not None and size_string != "": device_item.size = int(size_string) if device_item.size is not None: device_items_to_return.append(device_item) else: self.logger.log(msg=("skip the device {0} because we could not get size of it.".format(device_item.name))) return device_items_to_return def get_device_items(self, dev_path): if self.distro_patcher.distro_info[0].lower() == 'suse' and self.distro_patcher.distro_info[1] == '11': return self.get_device_items_sles(dev_path) else: self.logger.log(msg=("getting the blk info from " + str(dev_path))) if dev_path is None: lsblk_command = 'lsblk -b -n -P -o NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE,MAJ:MIN' else: lsblk_command = 'lsblk -b -n -P -o NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE,MAJ:MIN ' + dev_path proc_comm = ProcessCommunicator() self.command_executor.Execute(lsblk_command, communicator=proc_comm, raise_exception_on_failure=True) device_items = [] for line in proc_comm.stdout.splitlines(): if line: device_item = DeviceItem() for disk_info_property in line.split(): property_item_pair = disk_info_property.split('=') if property_item_pair[0] == 'SIZE': device_item.size = int(property_item_pair[1].strip('"')) if property_item_pair[0] == 'NAME': device_item.name = property_item_pair[1].strip('"') if property_item_pair[0] == 'TYPE': device_item.type = property_item_pair[1].strip('"') if property_item_pair[0] == 'FSTYPE': device_item.file_system = property_item_pair[1].strip('"') if property_item_pair[0] == 'MOUNTPOINT': device_item.mount_point = property_item_pair[1].strip('"') if property_item_pair[0] == 'LABEL': device_item.label = property_item_pair[1].strip('"') if property_item_pair[0] == 'UUID': device_item.uuid = property_item_pair[1].strip('"') if property_item_pair[0] == 'MODEL': device_item.model = property_item_pair[1].strip('"') if property_item_pair[0] == 'MAJ:MIN': device_item.majmin = property_item_pair[1].strip('"') if device_item.type.lower() == 'lvm': for lvm_item in self.get_lvm_items(): majmin = lvm_item.lv_kernel_major + ':' + lvm_item.lv_kernel_minor if majmin == device_item.majmin: device_item.name = lvm_item.vg_name + '/' + lvm_item.lv_name device_items.append(device_item) return device_items def get_lvm_items(self): lvs_command = 'lvs --noheadings --nameprefixes --unquoted -o lv_name,vg_name,lv_kernel_major,lv_kernel_minor' proc_comm = ProcessCommunicator() self.command_executor.Execute(lvs_command, communicator=proc_comm, raise_exception_on_failure=True) lvm_items = [] for line in proc_comm.stdout.splitlines(): if not line: continue lvm_item = LvmItem() for pair in line.strip().split(): if len(pair.split('=')) != 2: continue key, value = pair.split('=') if key == 'LVM2_LV_NAME': lvm_item.lv_name = value if key == 'LVM2_VG_NAME': lvm_item.vg_name = value if key == 'LVM2_LV_KERNEL_MAJOR': lvm_item.lv_kernel_major = value if key == 'LVM2_LV_KERNEL_MINOR': lvm_item.lv_kernel_minor = value lvm_items.append(lvm_item) return lvm_items def should_skip_for_inplace_encryption(self, device_item): """ TYPE="raid0" TYPE="part" TYPE="crypt" first check whether there's one file system on it. if the type is disk, then to check whether it have child-items, say the part, lvm or crypt luks. if the answer is yes, then skip it. """ if device_item.file_system is None or device_item.file_system == "": self.logger.log(msg=("there's no file system on this device: {0}, so skip it.").format(device_item)) return True else: if device_item.size < CommonVariables.min_filesystem_size_support: self.logger.log(msg="the device size is too small," + str(device_item.size) + " so skip it.", level=CommonVariables.WarningLevel) return True supported_device_type = ["disk","part","raid0","raid1","raid5","raid10","lvm"] if device_item.type not in supported_device_type: self.logger.log(msg="the device type: " + str(device_item.type) + " is not supported yet, so skip it.", level=CommonVariables.WarningLevel) return True if device_item.uuid is None or device_item.uuid == "": self.logger.log(msg="the device do not have the related uuid, so skip it.", level=CommonVariables.WarningLevel) return True sub_items = self.get_device_items("/dev/" + device_item.name) if len(sub_items) > 1: self.logger.log(msg=("there's sub items for the device:{0} , so skip it.".format(device_item.name)), level=CommonVariables.WarningLevel) return True azure_blk_items = self.get_azure_devices() if device_item.type == "crypt": self.logger.log(msg=("device_item.type is:{0}, so skip it.".format(device_item.type)), level=CommonVariables.WarningLevel) return True if device_item.mount_point == "/": self.logger.log(msg=("the mountpoint is root:{0}, so skip it.".format(device_item)), level=CommonVariables.WarningLevel) return True for azure_blk_item in azure_blk_items: if azure_blk_item.name == device_item.name: self.logger.log(msg="the mountpoint is the azure disk root or resource, so skip it.") return True return False def get_azure_devices(self): ide_devices = self.get_ide_devices() blk_items = [] for ide_device in ide_devices: current_blk_items = self.get_device_items("/dev/" + ide_device) for current_blk_item in current_blk_items: blk_items.append(current_blk_item) return blk_items def get_ide_devices(self): """ this only return the device names of the ide. """ ide_devices = [] for vmbus in os.listdir(self.vmbus_sys_path): f = open('%s/%s/%s' % (self.vmbus_sys_path, vmbus, 'class_id'), 'r') class_id = f.read() f.close() if class_id.strip() == self.ide_class_id: device_sdx_path = self.find_block_sdx_path(vmbus) self.logger.log("found one ide with vmbus: {0} and the sdx path is: {1}".format(vmbus, device_sdx_path)) ide_devices.append(device_sdx_path) return ide_devices def find_block_sdx_path(self, vmbus): device = None for root, dirs, files in os.walk(os.path.join(self.vmbus_sys_path , vmbus)): if root.endswith("/block"): device = dirs[0] else : #older distros for d in dirs: if ':' in d and "block" == d.split(':')[0]: device = d.split(':')[1] break return device
apache-2.0
-1,087,829,880,348,993,400
43.427778
174
0.57214
false
mywulin/functest
functest/opnfv_tests/openstack/tempest/conf_utils.py
1
9929
#!/usr/bin/env python # # Copyright (c) 2015 All rights reserved # This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # # http://www.apache.org/licenses/LICENSE-2.0 # """Tempest configuration utilities.""" import ConfigParser import logging import fileinput import os import shutil import subprocess import pkg_resources import yaml from functest.utils import config from functest.utils import env IMAGE_ID_ALT = None FLAVOR_ID_ALT = None RALLY_CONF_PATH = "/etc/rally/rally.conf" RALLY_AARCH64_PATCH_PATH = pkg_resources.resource_filename( 'functest', 'ci/rally_aarch64_patch.conf') GLANCE_IMAGE_PATH = os.path.join( getattr(config.CONF, 'dir_functest_images'), getattr(config.CONF, 'openstack_image_file_name')) TEMPEST_CUSTOM = pkg_resources.resource_filename( 'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt') TEMPEST_BLACKLIST = pkg_resources.resource_filename( 'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt') TEMPEST_CONF_YAML = pkg_resources.resource_filename( 'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml') CI_INSTALLER_TYPE = env.get('INSTALLER_TYPE') """ logging configuration """ LOGGER = logging.getLogger(__name__) def create_rally_deployment(): """Create new rally deployment""" # set the architecture to default pod_arch = env.get("POD_ARCH") arch_filter = ['aarch64'] if pod_arch and pod_arch in arch_filter: LOGGER.info("Apply aarch64 specific to rally config...") with open(RALLY_AARCH64_PATCH_PATH, "r") as pfile: rally_patch_conf = pfile.read() for line in fileinput.input(RALLY_CONF_PATH, inplace=1): print line, if "cirros|testvm" in line: print rally_patch_conf LOGGER.info("Creating Rally environment...") try: cmd = ['rally', 'deployment', 'destroy', '--deployment', str(getattr(config.CONF, 'rally_deployment_name'))] output = subprocess.check_output(cmd) LOGGER.info("%s\n%s", " ".join(cmd), output) except subprocess.CalledProcessError: pass cmd = ['rally', 'deployment', 'create', '--fromenv', '--name', str(getattr(config.CONF, 'rally_deployment_name'))] output = subprocess.check_output(cmd) LOGGER.info("%s\n%s", " ".join(cmd), output) cmd = ['rally', 'deployment', 'check'] output = subprocess.check_output(cmd) LOGGER.info("%s\n%s", " ".join(cmd), output) def create_verifier(): """Create new verifier""" LOGGER.info("Create verifier from existing repo...") cmd = ['rally', 'verify', 'delete-verifier', '--id', str(getattr(config.CONF, 'tempest_verifier_name')), '--force'] try: output = subprocess.check_output(cmd) LOGGER.info("%s\n%s", " ".join(cmd), output) except subprocess.CalledProcessError: pass cmd = ['rally', 'verify', 'create-verifier', '--source', str(getattr(config.CONF, 'dir_repo_tempest')), '--name', str(getattr(config.CONF, 'tempest_verifier_name')), '--type', 'tempest', '--system-wide'] output = subprocess.check_output(cmd) LOGGER.info("%s\n%s", " ".join(cmd), output) def get_verifier_id(): """ Returns verifier id for current Tempest """ create_rally_deployment() create_verifier() cmd = ("rally verify list-verifiers | awk '/" + getattr(config.CONF, 'tempest_verifier_name') + "/ {print $2}'") proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) deployment_uuid = proc.stdout.readline().rstrip() if deployment_uuid == "": LOGGER.error("Tempest verifier not found.") raise Exception('Error with command:%s' % cmd) return deployment_uuid def get_verifier_deployment_id(): """ Returns deployment id for active Rally deployment """ cmd = ("rally deployment list | awk '/" + getattr(config.CONF, 'rally_deployment_name') + "/ {print $2}'") proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) deployment_uuid = proc.stdout.readline().rstrip() if deployment_uuid == "": LOGGER.error("Rally deployment not found.") raise Exception('Error with command:%s' % cmd) return deployment_uuid def get_verifier_repo_dir(verifier_id): """ Returns installed verifier repo directory for Tempest """ if not verifier_id: verifier_id = get_verifier_id() return os.path.join(getattr(config.CONF, 'dir_rally_inst'), 'verification', 'verifier-{}'.format(verifier_id), 'repo') def get_verifier_deployment_dir(verifier_id, deployment_id): """ Returns Rally deployment directory for current verifier """ if not verifier_id: verifier_id = get_verifier_id() if not deployment_id: deployment_id = get_verifier_deployment_id() return os.path.join(getattr(config.CONF, 'dir_rally_inst'), 'verification', 'verifier-{}'.format(verifier_id), 'for-deployment-{}'.format(deployment_id)) def backup_tempest_config(conf_file, res_dir): """ Copy config file to tempest results directory """ if not os.path.exists(res_dir): os.makedirs(res_dir) shutil.copyfile(conf_file, os.path.join(res_dir, 'tempest.conf')) def update_tempest_conf_file(conf_file, rconfig): """Update defined paramters into tempest config file""" with open(TEMPEST_CONF_YAML) as yfile: conf_yaml = yaml.safe_load(yfile) if conf_yaml: sections = rconfig.sections() for section in conf_yaml: if section not in sections: rconfig.add_section(section) sub_conf = conf_yaml.get(section) for key, value in sub_conf.items(): rconfig.set(section, key, value) with open(conf_file, 'wb') as config_file: rconfig.write(config_file) def configure_tempest_update_params(tempest_conf_file, res_dir, network_name=None, image_id=None, flavor_id=None, compute_cnt=1): # pylint: disable=too-many-branches, too-many-arguments """ Add/update needed parameters into tempest.conf file """ LOGGER.debug("Updating selected tempest.conf parameters...") rconfig = ConfigParser.RawConfigParser() rconfig.read(tempest_conf_file) rconfig.set('compute', 'fixed_network_name', network_name) rconfig.set('compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME')) if image_id is not None: rconfig.set('compute', 'image_ref', image_id) if IMAGE_ID_ALT is not None: rconfig.set('compute', 'image_ref_alt', IMAGE_ID_ALT) if getattr(config.CONF, 'tempest_use_custom_flavors'): if flavor_id is not None: rconfig.set('compute', 'flavor_ref', flavor_id) if FLAVOR_ID_ALT is not None: rconfig.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT) if compute_cnt > 1: # enable multinode tests rconfig.set('compute', 'min_compute_nodes', compute_cnt) rconfig.set('compute-feature-enabled', 'live_migration', True) rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME')) identity_api_version = os.environ.get("OS_IDENTITY_API_VERSION", '3') if identity_api_version == '3': auth_version = 'v3' rconfig.set('identity-feature-enabled', 'api_v2', False) else: auth_version = 'v2' rconfig.set('identity', 'auth_version', auth_version) rconfig.set( 'validation', 'ssh_timeout', getattr(config.CONF, 'tempest_validation_ssh_timeout')) rconfig.set('object-storage', 'operator_role', getattr(config.CONF, 'tempest_object_storage_operator_role')) if os.environ.get('OS_ENDPOINT_TYPE') is not None: rconfig.set('identity', 'v3_endpoint_type', os.environ.get('OS_ENDPOINT_TYPE')) if os.environ.get('OS_ENDPOINT_TYPE') is not None: sections = rconfig.sections() services_list = [ 'compute', 'volume', 'image', 'network', 'data-processing', 'object-storage', 'orchestration'] for service in services_list: if service not in sections: rconfig.add_section(service) rconfig.set(service, 'endpoint_type', os.environ.get('OS_ENDPOINT_TYPE')) LOGGER.debug('Add/Update required params defined in tempest_conf.yaml ' 'into tempest.conf file') update_tempest_conf_file(tempest_conf_file, rconfig) backup_tempest_config(tempest_conf_file, res_dir) def configure_verifier(deployment_dir): """ Execute rally verify configure-verifier, which generates tempest.conf """ cmd = ['rally', 'verify', 'configure-verifier', '--reconfigure', '--id', str(getattr(config.CONF, 'tempest_verifier_name'))] output = subprocess.check_output(cmd) LOGGER.info("%s\n%s", " ".join(cmd), output) LOGGER.debug("Looking for tempest.conf file...") tempest_conf_file = os.path.join(deployment_dir, "tempest.conf") if not os.path.isfile(tempest_conf_file): LOGGER.error("Tempest configuration file %s NOT found.", tempest_conf_file) raise Exception("Tempest configuration file %s NOT found." % tempest_conf_file) else: return tempest_conf_file
apache-2.0
-6,122,440,020,923,547,000
34.974638
79
0.619901
false
leiferikb/bitpop
build/scripts/slave/unittests/test_env.py
1
4472
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Script to setup the environment to run unit tests. Modifies PYTHONPATH to automatically include parent, common and pylibs directories. """ import os import sys import textwrap RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__)) DATA_PATH = os.path.join(RUNTESTS_DIR, 'data') BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..')) DEPOT_TOOLS_DIR = os.path.join(BASE_DIR, os.pardir, 'depot_tools') sys.path.insert(0, os.path.join(BASE_DIR, 'scripts')) sys.path.insert(0, os.path.join(BASE_DIR, 'site_config')) sys.path.insert(0, os.path.join(BASE_DIR, 'third_party')) sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_slave_8_4')) sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2')) sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1')) def ensure_coverage_importable(): try: from distutils.version import StrictVersion import coverage if (StrictVersion(coverage.__version__) < StrictVersion('3.7') or not coverage.collector.CTracer): del sys.modules['coverage'] del coverage else: return except ImportError: if sys.platform.startswith('win'): # In order to compile the coverage module on Windows we need to set the # 'VS90COMNTOOLS' environment variable. This usually point to the # installation folder of VS2008 but we can fake it to make it point to the # version of the toolchain checked in depot_tools. # # This variable usually point to the $(VsInstallDir)\Common7\Tools but is # only used to access %VS90COMNTOOLS%/../../VC/vcvarsall.bat and therefore # any valid directory respecting this structure can be used. vc_path = os.path.join(DEPOT_TOOLS_DIR, 'win_toolchain', 'vs2013_files', 'VC', 'bin') # If the toolchain isn't available then ask the user to fetch chromium in # order to install it. if not os.path.isdir(vc_path): print textwrap.dedent(""" You probably don't have the Windows toolchain in your depot_tools checkout. Install it by running: fetch chromium """) sys.exit(1) os.environ['VS90COMNTOOLS'] = vc_path try: import setuptools # pylint: disable=W0612 except ImportError: print textwrap.dedent(""" No compatible system-wide python-coverage package installed, and setuptools is not installed either. Please obtain setuptools by: Debian/Ubuntu: sudo apt-get install python-setuptools python-dev OS X: https://pypi.python.org/pypi/setuptools#unix-including-mac-os-x-curl Other: https://pypi.python.org/pypi/setuptools#installation-instructions """) sys.exit(1) from pkg_resources import get_build_platform try: # Python 2.7 or >= 3.2 from sysconfig import get_python_version except ImportError: from distutils.sysconfig import get_python_version cov_dir = os.path.join(BASE_DIR, 'third_party', 'coverage-3.7.1') cov_egg = os.path.join(cov_dir, 'dist', 'coverage-3.7.1-py%s-%s.egg' % ( get_python_version(), get_build_platform())) # The C-compiled coverage engine is WAY faster (and less buggy) than the pure # python version, so we build the dist_egg if necessary. if not os.path.exists(cov_egg): import subprocess print 'Building Coverage 3.7.1' p = subprocess.Popen([sys.executable, 'setup.py', 'bdist_egg'], cwd=cov_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: print 'Error while building :(' print stdout print stderr if sys.platform.startswith('linux'): print textwrap.dedent(""" You probably don't have the 'python-dev' package installed. Install it by running: sudo apt-get install python-dev """) else: print textwrap.dedent(""" I'm not sure what's wrong, but your system seems incapable of building python extensions. Please fix that by installing a Python with headers and the approprite command-line build tools for your platform. """) sys.exit(1) sys.path.insert(0, cov_egg) ensure_coverage_importable() from common import find_depot_tools # pylint: disable=W0611
gpl-3.0
-8,570,735,398,111,095,000
36.579832
80
0.67576
false
ornlneutronimaging/ResoFit
ResoFit/data/IPTS_20439/ipts_20439_lead_partial.py
1
3265
import numpy as np import pprint import matplotlib.pyplot as plt from ResoFit.experiment import Experiment import peakutils as pku from ResoFit.simulation import Simulation from scipy import signal import scipy folder = 'data/IPTS_20439/reso_data_20439' sample_name = ['No Pb', '10mm Pb'] data_file = ['Ta.csv', 'Ta_Pb_whole.csv'] norm_to_file = ['OB.csv', 'OB_Pb_whole.csv'] norm_factor = [1.05, 1.22] spectra_file = 'Ta_lead_10mm__0__040_Spectra.txt' baseline = True deg = 6 # x_axis = 'number' logx = False # # # Calibrate the peak positions x_type = 'energy' y_type = 'transmission' source_to_detector_m = 16.45 offset_us = 0 fmt = '-' lw = 1 exps = {} ax0 = None for _index, each_name in enumerate(sample_name): exps[each_name] = Experiment(spectra_file=spectra_file, data_file=data_file[_index], folder=folder) exps[each_name].norm_to(file=norm_to_file[_index], norm_factor=norm_factor[_index]) if ax0 is None: ax0 = exps[each_name].plot(x_type=x_type, y_type=y_type, source_to_detector_m=source_to_detector_m, offset_us=offset_us, logx=logx, baseline=baseline, deg=deg, fmt=fmt, lw=lw, label=each_name) else: ax0 = exps[each_name].plot(ax_mpl=ax0, x_type=x_type, y_type=y_type, source_to_detector_m=source_to_detector_m, offset_us=offset_us, logx=logx, baseline=baseline, deg=deg, fmt=fmt, lw=lw, label=each_name) # simu.plot(ax_mpl=ax0[i], x_type='energy', y_type='attenuation', # source_to_detector_m=source_to_detector_m, offset_us=offset_us, logx=True, # mixed=False, all_layers=False, all_elements=False, items_to_plot=[_ele], # fmt='-.', lw=1, alpha=1) plt.xlim(5, 120) plt.show() # # # experiment1.plot(offset_us=offset_us, source_to_detector_m=source_to_detector_m, # # x_axis=x_axis, baseline=baseline, energy_xmax=energy_xmax, # # lambda_xmax=lambda_xmax) # # data = 1-experiment1.data[0] # # plt.plot(data, 'k-') # _baseline = pku.baseline(data, deg=7) # data_flat = data - _baseline # plt.plot(data_flat, 'b-') # # # indexes = pku.indexes(data_flat, thres=0.1, min_dist=50) # print(indexes) # plt.plot(data_flat[indexes], 'bx', label='peak') # # # peakind = signal.find_peaks_cwt(data_flat, widths=np.arange(1, len(data_flat))) # # print(peakind) # # plt.plot(data_flat[peakind], 'bs', label='peak') # # # After slicing # experiment1.slice(slice_start=300, slice_end=2200, reset_index=True) # data_sliced = 1-experiment1.data[0] # # plt.plot(data_sliced, 'r:') # _baseline_2 = pku.baseline(data_sliced, deg=7) # data_sliced_flat = data_sliced - _baseline_2 # plt.plot(experiment1.img_num, data_sliced_flat, 'y-') # # indexes = pku.indexes(data_sliced_flat, thres=0.1, min_dist=50) # x_indexes = indexes + 300 # print(indexes) # plt.plot(x_indexes, data_sliced_flat[indexes], 'rx', label='peak') # # # peakind = signal.find_peaks_cwt(data_sliced_flat, widths=np.arange(1, len(data_sliced_flat))) # # print(peakind) # # plt.plot(data_sliced_flat[peakind], 'rs', label='peak') # # plt.show() # plt.plot(x,y) # plt.show() # simulation.plot(items_to_plot=['U233'])
bsd-3-clause
5,409,193,133,953,388,000
33.368421
106
0.634916
false
GammaC0de/pyload
src/pyload/plugins/decrypters/FilerNetFolder.py
1
1050
# -*- coding: utf-8 -*- from ..base.simple_decrypter import SimpleDecrypter class FilerNetFolder(SimpleDecrypter): __name__ = "FilerNetFolder" __type__ = "decrypter" __version__ = "0.48" __status__ = "testing" __pyload_version__ = "0.5" __pattern__ = r"https?://filer\.net/folder/\w{16}" __config__ = [ ("enabled", "bool", "Activated", True), ("use_premium", "bool", "Use premium account if available", True), ( "folder_per_package", "Default;Yes;No", "Create folder for each package", "Default", ), ("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10), ] __description__ = """Filer.net decrypter plugin""" __license__ = "GPLv3" __authors__ = [ ("nath_schwarz", "[email protected]"), ("stickell", "[email protected]"), ] LINK_PATTERN = r'href="(/get/\w{16})">(?!<)' NAME_PATTERN = r"<h3>(?P<N>.+?) - <small" OFFLINE_PATTERN = r"Nicht gefunden"
agpl-3.0
1,552,273,822,165,763,600
27.378378
85
0.532381
false
kizniche/Mycodo
mycodo/inputs/sht2x.py
1
4086
# coding=utf-8 # From https://github.com/ControlEverythingCommunity/SHT25/blob/master/Python/SHT25.py import time import copy from mycodo.inputs.base_input import AbstractInput from mycodo.inputs.sensorutils import calculate_dewpoint from mycodo.inputs.sensorutils import calculate_vapor_pressure_deficit # Measurements measurements_dict = { 0: { 'measurement': 'temperature', 'unit': 'C' }, 1: { 'measurement': 'humidity', 'unit': 'percent' }, 2: { 'measurement': 'dewpoint', 'unit': 'C' }, 3: { 'measurement': 'vapor_pressure_deficit', 'unit': 'Pa' } } # Input information INPUT_INFORMATION = { 'input_name_unique': 'SHT2x', 'input_manufacturer': 'Sensirion', 'input_name': 'SHT2x', 'input_library': 'smbus2', 'measurements_name': 'Humidity/Temperature', 'measurements_dict': measurements_dict, 'url_manufacturer': 'https://www.sensirion.com/en/environmental-sensors/humidity-sensors/humidity-temperature-sensor-sht2x-digital-i2c-accurate/', 'options_enabled': [ 'measurements_select', 'period', 'pre_output' ], 'options_disabled': [ 'interface', 'i2c_location' ], 'dependencies_module': [ ('pip-pypi', 'smbus2', 'smbus2==0.4.1') ], 'interfaces': ['I2C'], 'i2c_location': ['0x40'], 'i2c_address_editable': False } class InputModule(AbstractInput): """ A sensor support class that measures the SHT2x's humidity and temperature and calculates the dew point """ def __init__(self, input_dev, testing=False): super(InputModule, self).__init__(input_dev, testing=testing, name=__name__) if not testing: self.initialize_input() def initialize_input(self): from smbus2 import SMBus self.i2c_address = int(str(self.input_dev.i2c_location), 16) self.sht2x = SMBus(self.input_dev.i2c_bus) def get_measurement(self): """ Gets the humidity and temperature """ if not self.sht2x: self.logger.error("Input not set up") return self.return_dict = copy.deepcopy(measurements_dict) for _ in range(2): try: # Send temperature measurement command # 0xF3(243) NO HOLD master self.sht2x.write_byte(self.i2c_address, 0xF3) time.sleep(0.5) # Read data back, 2 bytes # Temp MSB, Temp LSB data0 = self.sht2x.read_byte(self.i2c_address) data1 = self.sht2x.read_byte(self.i2c_address) temperature = -46.85 + (((data0 * 256 + data1) * 175.72) / 65536.0) # Send humidity measurement command # 0xF5(245) NO HOLD master self.sht2x.write_byte(self.i2c_address, 0xF5) time.sleep(0.5) # Read data back, 2 bytes # Humidity MSB, Humidity LSB data0 = self.sht2x.read_byte(self.i2c_address) data1 = self.sht2x.read_byte(self.i2c_address) humidity = -6 + (((data0 * 256 + data1) * 125.0) / 65536.0) if self.is_enabled(0): self.value_set(0, temperature) if self.is_enabled(1): self.value_set(1, humidity) if self.is_enabled(2) and self.is_enabled(0) and self.is_enabled(1): self.value_set(2, calculate_dewpoint(self.value_get(0), self.value_get(1))) if self.is_enabled(3) and self.is_enabled(0) and self.is_enabled(1): self.value_set(3, calculate_vapor_pressure_deficit(self.value_get(0), self.value_get(1))) return self.return_dict except Exception as e: self.logger.exception("Exception when taking a reading: {err}".format(err=e)) # Send soft reset and try a second read self.sht2x.write_byte(self.i2c_address, 0xFE) time.sleep(0.1)
gpl-3.0
8,223,683,000,269,916,000
31.688
150
0.572687
false
zenn1989/scoria-interlude
L2Jscoria-Game/data/scripts/quests/87_SagaOfEvasSaint/__init__.py
1
2329
# Made by Emperorc import sys from com.l2scoria.gameserver.model.quest import State from com.l2scoria.gameserver.model.quest import QuestState from quests.SagasSuperclass import Quest as JQuest qn = "87_SagaOfEvasSaint" qnu = 87 qna = "Saga of Eva's Saint" class Quest (JQuest) : def __init__(self,id,name,descr): # first initialize the quest. The superclass defines variables, instantiates States, etc JQuest.__init__(self,id,name,descr) # Next, override necessary variables: self.NPC = [30191,31626,31588,31280,31620,31646,31649,31653,31654,31655,31657,31280] self.Items = [7080,7524,7081,7502,7285,7316,7347,7378,7409,7440,7088,0] self.Mob = [27266,27236,27276] self.qn = qn self.classid = 105 self.prevclass = 0x1e self.X = [164650,46087,46066] self.Y = [-74121,-36372,-36396] self.Z = [-2871,-1685,-1685] self.Text = ["PLAYERNAME! Pursued to here! However, I jumped out of the Banshouren boundaries! You look at the giant as the sign of power!", "... Oh ... good! So it was ... let's begin!","I do not have the patience ..! I have been a giant force ...! Cough chatter ah ah ah!", "Paying homage to those who disrupt the orderly will be PLAYERNAME's death!","Now, my soul freed from the shackles of the millennium, Halixia, to the back side I come ...", "Why do you interfere others' battles?","This is a waste of time.. Say goodbye...!","...That is the enemy", "...Goodness! PLAYERNAME you are still looking?","PLAYERNAME ... Not just to whom the victory. Only personnel involved in the fighting are eligible to share in the victory.", "Your sword is not an ornament. Don't you think, PLAYERNAME?","Goodness! I no longer sense a battle there now.","let...","Only engaged in the battle to bar their choice. Perhaps you should regret.", "The human nation was foolish to try and fight a giant's strength.","Must...Retreat... Too...Strong.","PLAYERNAME. Defeat...by...retaining...and...Mo...Hacker","....! Fight...Defeat...It...Fight...Defeat...It..."] # finally, register all events to be triggered appropriately, using the overriden values. JQuest.registerNPCs(self) QUEST = Quest(qnu,qn,qna) QUEST.setInitialState(QUEST.CREATED)
gpl-3.0
-5,054,227,852,129,113,000
60.315789
231
0.670245
false
tudev/Checkout
app/main/views.py
1
20477
import cgi import email import hashlib import json import os import requests import smtplib import uuid from datetime import datetime from flask import (abort, jsonify, g, session, render_template, redirect, request, url_for) from functools import wraps from manage import app, client from random import randint from . import main @main.before_request def before_request(): session.permanent = True g.user = None if 'user' in session: g.user = session['user'] @main.route('/') def index(): db = client.tudev_checkout inventory = db.inventory.find() inventory_list = [] for item in inventory: formatted_item = ''' <tr> <td>{name}</td> <td class="hide-col" id="{name}-quant">{quantity}</td> <td class="hide-col">{reservation_length}</td> <td class="hide-col">{item_category}</td> <td class="hide-col"><a href="{tutorials_link}">Link</a></td> <td> <form id="add-to-cart"> <input type="number" name="cart_quanity" min="1" max="{quantity}" placeholder="1" id="cart_quantity" required> <input id="name" value="{name}" style="display: none" disabled> <button id="add_to_cart_b" type="submit" style="background: none; border: none;"> <a> Add to Cart <i class="fa fa-shopping-cart" aria-hidden="true"></i> </a> </button> </form> </td> </tr> '''.format(item_id=item['item_id'], name=item['name'], quantity=item['quantity'], reservation_length=item['reservation_length'], item_category=item['category'], tutorials_link=item['tutorials_link']) inventory_list.append(formatted_item) formatted_inventory = '\n'.join(inventory_list) hackathons = db.hackathons.find() hackathon_list = [] for hackathon in hackathons: maps_link = 'https://www.google.com/maps/search/' + \ hackathon['location'] formatted_hackathon = ''' <tr> <td><a href="{link}" target="_blank">{name}</a></td> <td class="hide-col-h"> <a href="{location_link}" target="_blank">{location}</a> </td> <td>{date}</td> </tr> '''.format(name=hackathon['name'], location=hackathon['location'], location_link=maps_link, date=hackathon['date_range'], link=hackathon['link']) hackathon_list.append(formatted_hackathon) formatted_hackathons = '\n'.join(hackathon_list) client_id = None welcome_msg = None user = None if g.user is None or 'user' not in session: client_id = app.config['CLIENT_ID'] else: db = client.tudev_checkout found_user = db.users.find_one({'email': session['user']}) user = session['user'] if found_user: if found_user['email'] == '[email protected]': user = 'Cuff Boy' else: user = found_user['name'].split(' ')[0] random_msg_index = randint(0,len(app.config['WELCOME_MSG'])-1) welcome_msg = app.config['WELCOME_MSG'][random_msg_index] admin = False if 'user' in session: admin = session['user'] in app.config['ADMIN_EMAILS'] return render_template('index.html', inventory=formatted_inventory, hackathons=formatted_hackathons, user=user, client_id=client_id, welcome_msg=welcome_msg, admin=admin, host_url=request.host_url) def admin_required(f): ''' Allows the passed function to only be executed when the user is logged in :return: decorated function ''' @wraps(f) def decorated_function(*args, **kwargs): if 'user' in session: if session['user'] in app.config['ADMIN_EMAILS']: return f(*args, **kwargs) return redirect(url_for('.index')) return f(*args, **kwargs) return decorated_function def login_required(f): ''' Allows the passed function to only be executed when the user is logged in :return: decorated function ''' @wraps(f) def decorated_function(*args, **kwargs): if 'user' in session: db = client.tudev_checkout found_user = db.users.find_one({'email': session['user']}) if found_user: return f(*args, **kwargs) abort(405) return f(*args, **kwargs) return decorated_function @main.route('/submit_request', methods=['POST']) @login_required def submit_request(): db = client.tudev_checkout data = dict(request.form) resp = {} resp['success'] = [] resp['failed'] = [] for item in data: item_name = data[item][0] item_quantity = int(data[item][1]) stored_item = db.inventory.find_one({'name': item_name}) if(stored_item): if(stored_item['quantity'] >= item_quantity): new_quant = stored_item['quantity'] - item_quantity db.inventory.update({'name': item_name}, {'$set': {'quantity': new_quant}}) resp['success'].append({'name': item_name, 'quantity': item_quantity}) else: resp['failed'].append({'name': item_name, 'quantity': item_quantity}) request_id = str(uuid.uuid4())[:4] resp['id'] = request_id # if any items were checked out if(resp['success']): # send emails email_account = session['user'] full_account = db.users.find_one({'email': email_account}) if(full_account): user_name = full_account['name'] else: user_name = None email_server = smtplib.SMTP(app.config['SMTP'], 25) email_server.starttls() order_msg = email.message.Message() order_msg['Subject'] = 'TUDev Hardware - Item Request' order_msg['From'] = app.config['REQUEST_EMAIL_SEND'] order_msg['To'] = 'TUDev Orders' order_msg.add_header('Content-Type', 'text/html') items = [] for item in resp['success']: formatted_item = ''' <li>{quantity}x {name}<hr /></li> '''.format(quantity=item['quantity'], name=item['name']) items.append(formatted_item) items = ''.join(items) safe_name = cgi.escape(user_name).encode('ascii', 'xmlcharrefreplace') safe_email = cgi.escape(email_account).encode('ascii', 'xmlcharrefreplace') email_content = ''' <html> <body> <h1>Item Order</h1> <p><strong>From: </strong>{name}</p> <p><strong>Email: </strong>{email_account}</p> <p><strong>Order ID: </strong>{order_id}</p> <p><strong>Items ordered: </strong> <ul> {items} </ul> </p> </body> </html>'''.format(name=safe_name, email_account=safe_email, items=items, order_id=request_id) order_msg.set_payload(email_content) for account in app.config['REQUEST_EMAIL_ADMINS']: email_server.sendmail(app.config['REQUEST_EMAIL_SEND'], account, order_msg.as_string()) return jsonify(resp) @main.route('/admin') @admin_required def admin(): db = client.tudev_checkout inventory = db.inventory.find() inventory_list = [] for item in inventory: formatted_item = ''' <tr> <td>{item_id}</td> <td>{name}</td> <td class="hide-col">{quantity}</td> <td class="hide-col">{reservation_length}</td> <td class="hide-col">{item_category}</td> <td class="hide-col"><a href="{tutorials_link}">Link</a></td> </tr>'''.format(item_id=item['item_id'], name=item['name'], quantity=item['quantity'], reservation_length=item['reservation_length'], item_category=item['category'], tutorials_link=item['tutorials_link']) inventory_list.append(formatted_item) formatted_inventory = '\n'.join(inventory_list) hackathons = db.hackathons.find() hackathon_list = [] for hackathon in hackathons: maps_link = 'https://www.google.com/maps/search/' + \ hackathon['location'] formatted_hackathon = ''' <tr> <td><a href="{link}" target="_blank">{name}</a></td> <td class="hide-col-h"> <a href="{location_link}" target="_blank">{location}</a> </td> <td>{date}</td> </tr> '''.format(name=hackathon['name'], location=hackathon['location'], date=hackathon['date_range'], link=hackathon['link'], location_link=maps_link) hackathon_list.append(formatted_hackathon) formatted_hackathons = '\n'.join(hackathon_list) db = client.tudev_checkout found_user = db.users.find_one({'email': session['user']}) user = session['user'] if found_user: if found_user['email'] == '[email protected]': user = 'Cuff Boy' else: user = found_user['name'].split(' ')[0] random_msg_index = randint(0,len(app.config['WELCOME_MSG'])-1) welcome_msg = app.config['WELCOME_MSG'][random_msg_index] return render_template('admin.html', inventory=formatted_inventory, hackathons=formatted_hackathons, user=user, welcome_msg=welcome_msg) @main.route('/authorize') def authorize(): code = request.values['code'] oauth_url = ('https://slack.com/api/oauth.access?client_id=%s' '&client_secret=%s&code=%s' % (app.config['CLIENT_ID'], app.config['CLIENT_SECRET'], code)) oauth_verify = requests.get(oauth_url) response = json.loads(oauth_verify.text) print(response) if response['ok']: # set session for user session['user'] = response['user']['email'] # add user to database to track how many people have signed in db = client.tudev_checkout db.users.update({'email': response['user']['email']}, { 'email': response['user']['email'], 'name': response['user']['name'], 'recent-signin': datetime.now() }, upsert=True) if response['user']['email'] in app.config['ADMIN_EMAILS']: return redirect(url_for('.admin')) else: redirect(url_for('.index')) else: return jsonify({'status': 'not logged in'}) @main.route('/request_item', methods=['POST']) @login_required def request_item(): data = request.form try: name = data['name'] email_account = data['email'] item = data['item'] content = data['content'] confirm_msg = email.message.Message() confirm_msg['Subject'] = 'Request Item - Request Recieved' confirm_msg['From'] = app.config['REQUEST_EMAIL_SEND'] confirm_msg['To'] = email_account confirm_msg.add_header('Content-Type', 'text/html') safe_name = cgi.escape(name.split(' ')[0]).encode('ascii', 'xmlcharrefreplace') safe_item = cgi.escape(item).encode('ascii', 'xmlcharrefreplace') email_content = ''' <html> <body> <p> Hey {name}! <br> We recieved your request, we'll look into "{item}". <br> Happy Hacking, <br> The TUDev Team </p> </body> </html>'''.format(name=safe_name, item=safe_item) confirm_msg.set_payload(email_content) email_server = smtplib.SMTP(app.config['SMTP'], 25) email_server.starttls() # send email email_server.sendmail(app.config['REQUEST_EMAIL_SEND'], email_account, confirm_msg.as_string()) request_msg = email.message.Message() request_msg['Subject'] = 'TUDev Hardware - Item Request' request_msg['From'] = app.config['REQUEST_EMAIL_SEND'] request_msg['To'] = email_account request_msg.add_header('Content-Type', 'text/html') safe_email = cgi.escape(email_account).encode('ascii', 'xmlcharrefreplace') safe_content = cgi.escape(content).encode('ascii', 'xmlcharrefreplace') email_content = ''' <html> <body> <h1>Item Request</h1> <p><strong>From: </strong>{name}</p> <p><strong>Email: </strong>{email_account}</p> <p><strong>Item Requested: </strong>{item}</p> <p><strong>Reason for request</strong><br>{content}</p> </body> </html>'''.format(name=safe_name, email_account=safe_email, item=safe_item, content=safe_content) request_msg.set_payload(email_content) for account in app.config['REQUEST_EMAIL_ADMINS']: email_server.sendmail(app.config['REQUEST_EMAIL_SEND'], account, request_msg.as_string()) return jsonify({'status': 'request sent'}) except KeyError as e: abort(400) @main.route('/inventory') def inventory(): return jsonify({'status': 'wip'}) @main.route('/add_hackathon', methods=['POST']) @admin_required def add_hackathon(): data = request.form try: name = data['name'] location = data['location'] date_range = data['date'] link = data['link'] db = client.tudev_checkout safe_name = cgi.escape(name).encode('ascii', 'xmlcharrefreplace') safe_location = cgi.escape(location).encode('ascii', 'xmlcharrefreplace') safe_date = cgi.escape(date_range).encode('ascii', 'xmlcharrefreplace') db.hackathons.update({'name': safe_name}, { 'name': safe_name, 'location': safe_location, 'date_range': safe_date, 'link': link }, upsert=True) return jsonify({'Status': 'Hackathon added/updated.'}) except KeyError: abort(400) @main.route('/remove_hackathon', methods=['POST']) @admin_required def remove_hackathon(): data = request.form try: hackathon_name = data['name'] db = client.tudev_checkout db.hackathons.remove({'name': hackathon_name}) return jsonify({'status': 'hackathon removed'}) except KeyError: abort(400) @main.route('/add_item', methods=['POST']) @admin_required def add_tem(): data = request.form try: name = data['name'] quantity = int(data['quantity']) res_length = data['res_length'] category = data['category'] tutorial_link = data['item_link'] item_id = data['item_id'] if item_id: db = client.tudev_checkout safe_name = cgi.escape(name).encode('ascii', 'xmlcharrefreplace') safe_res = cgi.escape(res_length).encode('ascii', 'xmlcharrefreplace') safe_category = cgi.escape(category).encode('ascii', 'xmlcharrefreplace') safe_id = cgi.escape(item_id).encode('ascii', 'xmlcharrefreplace') db.inventory.update({'item_id': item_id}, { 'name': safe_name, 'quantity': quantity, 'reservation_length': safe_res, 'category': safe_category, 'tutorials_link': tutorial_link, 'item_id': safe_id }, upsert=True) return jsonify({'updated': item_id}) else: item_id = hashlib.sha1(bytes(os.urandom(32))) item_id = item_id.hexdigest()[:4] db = client.tudev_checkout safe_name = cgi.escape(name).encode('ascii', 'xmlcharrefreplace') safe_res = cgi.escape(res_length).encode('ascii', 'xmlcharrefreplace') safe_category = cgi.escape(category).encode('ascii', 'xmlcharrefreplace') db.inventory.insert({ 'name': safe_name, 'quantity': quantity, 'reservation_length': safe_res, 'category': safe_category, 'tutorials_link': tutorial_link, 'item_id': item_id }) return jsonify({'inserted': item_id}) except KeyError: abort(400) return jsonify({'status': 'done'}) @main.route('/increase_quantity', methods=['POST']) @admin_required def increase_quantity(): data = request.form try: item_id = data['item_id'] add_ons = int(data['quantity']) db = client.tudev_checkout c_item = db.inventory.find_one({'item_id': item_id}) if c_item: db.inventory.update({'item_id': item_id}, { 'name': c_item['name'], 'quantity': c_item['quantity'] + add_ons, 'reservation_length': c_item['reservation_length'], 'category': c_item['category'], 'tutorials_link': c_item['tutorials_link'], 'item_id': item_id }) return jsonify({'updated': item_id}) else: abort(404) except KeyError: abort(400) @main.route('/remove_item', methods=['POST']) @admin_required def remove_item(): data = request.form try: item_id = data['item_id'] if data['quantity']: removals = int(data['quantity']) else: removals = 0 if removals: db = client.tudev_checkout c_item = db.inventory.find_one({'item_id': item_id}) if c_item: if c_item['quantity'] > removals: db.inventory.update({'item_id': item_id}, { 'name': c_item['name'], 'quantity': c_item['quantity'] - \ removals, 'reservation_length': c_item['reservation_length'], 'category': c_item['category'], 'tutorials_link': c_item['tutorials_link'], 'item_id': item_id }) return jsonify({'updated': item_id}) else: db.inventory.remove({'item_id': item_id}) return jsonify({'removed': item_id}) else: db = client.tudev_checkout db.inventory.remove({'item_id': item_id}) return jsonify({'removed': item_id}) except KeyError: abort(400) @main.route('/logout') def logout(): g.user = None session.pop('user', None) return redirect(url_for('.index'))
apache-2.0
6,293,730,994,258,273,000
35.566071
91
0.500708
false
googleapis/googleapis-gen
google/cloud/securitycenter/v1/securitycenter-v1-py/tests/unit/gapic/securitycenter_v1/test_security_center.py
1
291665
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import mock import packaging.version import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import future from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.securitycenter_v1.services.security_center import SecurityCenterAsyncClient from google.cloud.securitycenter_v1.services.security_center import SecurityCenterClient from google.cloud.securitycenter_v1.services.security_center import pagers from google.cloud.securitycenter_v1.services.security_center import transports from google.cloud.securitycenter_v1.services.security_center.transports.base import _GOOGLE_AUTH_VERSION from google.cloud.securitycenter_v1.types import finding from google.cloud.securitycenter_v1.types import finding as gcs_finding from google.cloud.securitycenter_v1.types import notification_config from google.cloud.securitycenter_v1.types import notification_config as gcs_notification_config from google.cloud.securitycenter_v1.types import organization_settings from google.cloud.securitycenter_v1.types import organization_settings as gcs_organization_settings from google.cloud.securitycenter_v1.types import run_asset_discovery_response from google.cloud.securitycenter_v1.types import security_marks from google.cloud.securitycenter_v1.types import security_marks as gcs_security_marks from google.cloud.securitycenter_v1.types import securitycenter_service from google.cloud.securitycenter_v1.types import source from google.cloud.securitycenter_v1.types import source as gcs_source from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import duration_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.type import expr_pb2 # type: ignore import google.auth # TODO(busunkim): Once google-auth >= 1.25.0 is required transitively # through google-api-core: # - Delete the auth "less than" test cases # - Delete these pytest markers (Make the "greater than or equal to" tests the default). requires_google_auth_lt_1_25_0 = pytest.mark.skipif( packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), reason="This test requires google-auth < 1.25.0", ) requires_google_auth_gte_1_25_0 = pytest.mark.skipif( packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), reason="This test requires google-auth >= 1.25.0", ) def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert SecurityCenterClient._get_default_mtls_endpoint(None) is None assert SecurityCenterClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint assert SecurityCenterClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint assert SecurityCenterClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint assert SecurityCenterClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint assert SecurityCenterClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi @pytest.mark.parametrize("client_class", [ SecurityCenterClient, SecurityCenterAsyncClient, ]) def test_security_center_client_from_service_account_info(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == 'securitycenter.googleapis.com:443' @pytest.mark.parametrize("client_class", [ SecurityCenterClient, SecurityCenterAsyncClient, ]) def test_security_center_client_service_account_always_use_jwt(client_class): with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) use_jwt.assert_not_called() @pytest.mark.parametrize("transport_class,transport_name", [ (transports.SecurityCenterGrpcTransport, "grpc"), (transports.SecurityCenterGrpcAsyncIOTransport, "grpc_asyncio"), ]) def test_security_center_client_service_account_always_use_jwt_true(transport_class, transport_name): with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) @pytest.mark.parametrize("client_class", [ SecurityCenterClient, SecurityCenterAsyncClient, ]) def test_security_center_client_from_service_account_file(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == 'securitycenter.googleapis.com:443' def test_security_center_client_get_transport_class(): transport = SecurityCenterClient.get_transport_class() available_transports = [ transports.SecurityCenterGrpcTransport, ] assert transport in available_transports transport = SecurityCenterClient.get_transport_class("grpc") assert transport == transports.SecurityCenterGrpcTransport @pytest.mark.parametrize("client_class,transport_class,transport_name", [ (SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc"), (SecurityCenterAsyncClient, transports.SecurityCenterGrpcAsyncIOTransport, "grpc_asyncio"), ]) @mock.patch.object(SecurityCenterClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecurityCenterClient)) @mock.patch.object(SecurityCenterAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecurityCenterAsyncClient)) def test_security_center_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. with mock.patch.object(SecurityCenterClient, 'get_transport_class') as gtc: transport = transport_class( credentials=ga_credentials.AnonymousCredentials() ) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(SecurityCenterClient, 'get_transport_class') as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class() # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): with pytest.raises(ValueError): client = client_class() # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ (SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc", "true"), (SecurityCenterAsyncClient, transports.SecurityCenterGrpcAsyncIOTransport, "grpc_asyncio", "true"), (SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc", "false"), (SecurityCenterAsyncClient, transports.SecurityCenterGrpcAsyncIOTransport, "grpc_asyncio", "false"), ]) @mock.patch.object(SecurityCenterClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecurityCenterClient)) @mock.patch.object(SecurityCenterAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecurityCenterAsyncClient)) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_security_center_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class(client_options=options) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): with mock.patch.object(transport_class, '__init__') as patched: with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): with mock.patch.object(transport_class, '__init__') as patched: with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @pytest.mark.parametrize("client_class,transport_class,transport_name", [ (SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc"), (SecurityCenterAsyncClient, transports.SecurityCenterGrpcAsyncIOTransport, "grpc_asyncio"), ]) def test_security_center_client_client_options_scopes(client_class, transport_class, transport_name): # Check the case scopes are provided. options = client_options.ClientOptions( scopes=["1", "2"], ) with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @pytest.mark.parametrize("client_class,transport_class,transport_name", [ (SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc"), (SecurityCenterAsyncClient, transports.SecurityCenterGrpcAsyncIOTransport, "grpc_asyncio"), ]) def test_security_center_client_client_options_credentials_file(client_class, transport_class, transport_name): # Check the case credentials file is provided. options = client_options.ClientOptions( credentials_file="credentials.json" ) with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) def test_security_center_client_client_options_from_dict(): with mock.patch('google.cloud.securitycenter_v1.services.security_center.transports.SecurityCenterGrpcTransport.__init__') as grpc_transport: grpc_transport.return_value = None client = SecurityCenterClient( client_options={'api_endpoint': 'squid.clam.whelk'} ) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) def test_create_source(transport: str = 'grpc', request_type=securitycenter_service.CreateSourceRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_source.Source( name='name_value', display_name='display_name_value', description='description_value', canonical_name='canonical_name_value', ) response = client.create_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.CreateSourceRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_source.Source) assert response.name == 'name_value' assert response.display_name == 'display_name_value' assert response.description == 'description_value' assert response.canonical_name == 'canonical_name_value' def test_create_source_from_dict(): test_create_source(request_type=dict) def test_create_source_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_source), '__call__') as call: client.create_source() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.CreateSourceRequest() @pytest.mark.asyncio async def test_create_source_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.CreateSourceRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source( name='name_value', display_name='display_name_value', description='description_value', canonical_name='canonical_name_value', )) response = await client.create_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.CreateSourceRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_source.Source) assert response.name == 'name_value' assert response.display_name == 'display_name_value' assert response.description == 'description_value' assert response.canonical_name == 'canonical_name_value' @pytest.mark.asyncio async def test_create_source_async_from_dict(): await test_create_source_async(request_type=dict) def test_create_source_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.CreateSourceRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_source), '__call__') as call: call.return_value = gcs_source.Source() client.create_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_create_source_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.CreateSourceRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_source), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source()) await client.create_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_create_source_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_source.Source() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_source( parent='parent_value', source=gcs_source.Source(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' assert args[0].source == gcs_source.Source(name='name_value') def test_create_source_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.create_source( securitycenter_service.CreateSourceRequest(), parent='parent_value', source=gcs_source.Source(name='name_value'), ) @pytest.mark.asyncio async def test_create_source_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_source.Source() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_source( parent='parent_value', source=gcs_source.Source(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' assert args[0].source == gcs_source.Source(name='name_value') @pytest.mark.asyncio async def test_create_source_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.create_source( securitycenter_service.CreateSourceRequest(), parent='parent_value', source=gcs_source.Source(name='name_value'), ) def test_create_finding(transport: str = 'grpc', request_type=securitycenter_service.CreateFindingRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_finding), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_finding.Finding( name='name_value', parent='parent_value', resource_name='resource_name_value', state=gcs_finding.Finding.State.ACTIVE, category='category_value', external_uri='external_uri_value', severity=gcs_finding.Finding.Severity.CRITICAL, canonical_name='canonical_name_value', ) response = client.create_finding(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.CreateFindingRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_finding.Finding) assert response.name == 'name_value' assert response.parent == 'parent_value' assert response.resource_name == 'resource_name_value' assert response.state == gcs_finding.Finding.State.ACTIVE assert response.category == 'category_value' assert response.external_uri == 'external_uri_value' assert response.severity == gcs_finding.Finding.Severity.CRITICAL assert response.canonical_name == 'canonical_name_value' def test_create_finding_from_dict(): test_create_finding(request_type=dict) def test_create_finding_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_finding), '__call__') as call: client.create_finding() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.CreateFindingRequest() @pytest.mark.asyncio async def test_create_finding_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.CreateFindingRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_finding), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding( name='name_value', parent='parent_value', resource_name='resource_name_value', state=gcs_finding.Finding.State.ACTIVE, category='category_value', external_uri='external_uri_value', severity=gcs_finding.Finding.Severity.CRITICAL, canonical_name='canonical_name_value', )) response = await client.create_finding(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.CreateFindingRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_finding.Finding) assert response.name == 'name_value' assert response.parent == 'parent_value' assert response.resource_name == 'resource_name_value' assert response.state == gcs_finding.Finding.State.ACTIVE assert response.category == 'category_value' assert response.external_uri == 'external_uri_value' assert response.severity == gcs_finding.Finding.Severity.CRITICAL assert response.canonical_name == 'canonical_name_value' @pytest.mark.asyncio async def test_create_finding_async_from_dict(): await test_create_finding_async(request_type=dict) def test_create_finding_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.CreateFindingRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_finding), '__call__') as call: call.return_value = gcs_finding.Finding() client.create_finding(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_create_finding_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.CreateFindingRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_finding), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding()) await client.create_finding(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_create_finding_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_finding), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_finding.Finding() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_finding( parent='parent_value', finding_id='finding_id_value', finding=gcs_finding.Finding(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' assert args[0].finding_id == 'finding_id_value' assert args[0].finding == gcs_finding.Finding(name='name_value') def test_create_finding_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.create_finding( securitycenter_service.CreateFindingRequest(), parent='parent_value', finding_id='finding_id_value', finding=gcs_finding.Finding(name='name_value'), ) @pytest.mark.asyncio async def test_create_finding_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_finding), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_finding.Finding() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_finding( parent='parent_value', finding_id='finding_id_value', finding=gcs_finding.Finding(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' assert args[0].finding_id == 'finding_id_value' assert args[0].finding == gcs_finding.Finding(name='name_value') @pytest.mark.asyncio async def test_create_finding_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.create_finding( securitycenter_service.CreateFindingRequest(), parent='parent_value', finding_id='finding_id_value', finding=gcs_finding.Finding(name='name_value'), ) def test_create_notification_config(transport: str = 'grpc', request_type=securitycenter_service.CreateNotificationConfigRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_notification_config.NotificationConfig( name='name_value', description='description_value', pubsub_topic='pubsub_topic_value', service_account='service_account_value', streaming_config=gcs_notification_config.NotificationConfig.StreamingConfig(filter='filter_value'), ) response = client.create_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.CreateNotificationConfigRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_notification_config.NotificationConfig) assert response.name == 'name_value' assert response.description == 'description_value' assert response.pubsub_topic == 'pubsub_topic_value' assert response.service_account == 'service_account_value' def test_create_notification_config_from_dict(): test_create_notification_config(request_type=dict) def test_create_notification_config_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_notification_config), '__call__') as call: client.create_notification_config() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.CreateNotificationConfigRequest() @pytest.mark.asyncio async def test_create_notification_config_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.CreateNotificationConfigRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcs_notification_config.NotificationConfig( name='name_value', description='description_value', pubsub_topic='pubsub_topic_value', service_account='service_account_value', )) response = await client.create_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.CreateNotificationConfigRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_notification_config.NotificationConfig) assert response.name == 'name_value' assert response.description == 'description_value' assert response.pubsub_topic == 'pubsub_topic_value' assert response.service_account == 'service_account_value' @pytest.mark.asyncio async def test_create_notification_config_async_from_dict(): await test_create_notification_config_async(request_type=dict) def test_create_notification_config_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.CreateNotificationConfigRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_notification_config), '__call__') as call: call.return_value = gcs_notification_config.NotificationConfig() client.create_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_create_notification_config_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.CreateNotificationConfigRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_notification_config), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_notification_config.NotificationConfig()) await client.create_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_create_notification_config_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_notification_config.NotificationConfig() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_notification_config( parent='parent_value', config_id='config_id_value', notification_config=gcs_notification_config.NotificationConfig(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' assert args[0].config_id == 'config_id_value' assert args[0].notification_config == gcs_notification_config.NotificationConfig(name='name_value') def test_create_notification_config_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.create_notification_config( securitycenter_service.CreateNotificationConfigRequest(), parent='parent_value', config_id='config_id_value', notification_config=gcs_notification_config.NotificationConfig(name='name_value'), ) @pytest.mark.asyncio async def test_create_notification_config_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_notification_config.NotificationConfig() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_notification_config.NotificationConfig()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_notification_config( parent='parent_value', config_id='config_id_value', notification_config=gcs_notification_config.NotificationConfig(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' assert args[0].config_id == 'config_id_value' assert args[0].notification_config == gcs_notification_config.NotificationConfig(name='name_value') @pytest.mark.asyncio async def test_create_notification_config_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.create_notification_config( securitycenter_service.CreateNotificationConfigRequest(), parent='parent_value', config_id='config_id_value', notification_config=gcs_notification_config.NotificationConfig(name='name_value'), ) def test_delete_notification_config(transport: str = 'grpc', request_type=securitycenter_service.DeleteNotificationConfigRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = None response = client.delete_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.DeleteNotificationConfigRequest() # Establish that the response is the type that we expect. assert response is None def test_delete_notification_config_from_dict(): test_delete_notification_config(request_type=dict) def test_delete_notification_config_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_notification_config), '__call__') as call: client.delete_notification_config() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.DeleteNotificationConfigRequest() @pytest.mark.asyncio async def test_delete_notification_config_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.DeleteNotificationConfigRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) response = await client.delete_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.DeleteNotificationConfigRequest() # Establish that the response is the type that we expect. assert response is None @pytest.mark.asyncio async def test_delete_notification_config_async_from_dict(): await test_delete_notification_config_async(request_type=dict) def test_delete_notification_config_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.DeleteNotificationConfigRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_notification_config), '__call__') as call: call.return_value = None client.delete_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_delete_notification_config_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.DeleteNotificationConfigRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_notification_config), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) await client.delete_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] def test_delete_notification_config_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_notification_config( name='name_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].name == 'name_value' def test_delete_notification_config_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.delete_notification_config( securitycenter_service.DeleteNotificationConfigRequest(), name='name_value', ) @pytest.mark.asyncio async def test_delete_notification_config_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = None call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.delete_notification_config( name='name_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].name == 'name_value' @pytest.mark.asyncio async def test_delete_notification_config_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.delete_notification_config( securitycenter_service.DeleteNotificationConfigRequest(), name='name_value', ) def test_get_iam_policy(transport: str = 'grpc', request_type=iam_policy_pb2.GetIamPolicyRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_iam_policy), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = policy_pb2.Policy( version=774, etag=b'etag_blob', ) response = client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.GetIamPolicyRequest() # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) assert response.version == 774 assert response.etag == b'etag_blob' def test_get_iam_policy_from_dict(): test_get_iam_policy(request_type=dict) def test_get_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_iam_policy), '__call__') as call: client.get_iam_policy() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.GetIamPolicyRequest() @pytest.mark.asyncio async def test_get_iam_policy_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.GetIamPolicyRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_iam_policy), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy( version=774, etag=b'etag_blob', )) response = await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.GetIamPolicyRequest() # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) assert response.version == 774 assert response.etag == b'etag_blob' @pytest.mark.asyncio async def test_get_iam_policy_async_from_dict(): await test_get_iam_policy_async(request_type=dict) def test_get_iam_policy_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = iam_policy_pb2.GetIamPolicyRequest() request.resource = 'resource/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_iam_policy), '__call__') as call: call.return_value = policy_pb2.Policy() client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'resource=resource/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_get_iam_policy_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = iam_policy_pb2.GetIamPolicyRequest() request.resource = 'resource/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_iam_policy), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'resource=resource/value', ) in kw['metadata'] def test_get_iam_policy_from_dict_foreign(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_iam_policy), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = policy_pb2.Policy() response = client.get_iam_policy(request={ 'resource': 'resource_value', 'options': options_pb2.GetPolicyOptions(requested_policy_version=2598), } ) call.assert_called() def test_get_iam_policy_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_iam_policy), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = policy_pb2.Policy() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_iam_policy( resource='resource_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].resource == 'resource_value' def test_get_iam_policy_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_iam_policy( iam_policy_pb2.GetIamPolicyRequest(), resource='resource_value', ) @pytest.mark.asyncio async def test_get_iam_policy_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_iam_policy), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = policy_pb2.Policy() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_iam_policy( resource='resource_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].resource == 'resource_value' @pytest.mark.asyncio async def test_get_iam_policy_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_iam_policy( iam_policy_pb2.GetIamPolicyRequest(), resource='resource_value', ) def test_get_notification_config(transport: str = 'grpc', request_type=securitycenter_service.GetNotificationConfigRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = notification_config.NotificationConfig( name='name_value', description='description_value', pubsub_topic='pubsub_topic_value', service_account='service_account_value', streaming_config=notification_config.NotificationConfig.StreamingConfig(filter='filter_value'), ) response = client.get_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GetNotificationConfigRequest() # Establish that the response is the type that we expect. assert isinstance(response, notification_config.NotificationConfig) assert response.name == 'name_value' assert response.description == 'description_value' assert response.pubsub_topic == 'pubsub_topic_value' assert response.service_account == 'service_account_value' def test_get_notification_config_from_dict(): test_get_notification_config(request_type=dict) def test_get_notification_config_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_notification_config), '__call__') as call: client.get_notification_config() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GetNotificationConfigRequest() @pytest.mark.asyncio async def test_get_notification_config_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.GetNotificationConfigRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(notification_config.NotificationConfig( name='name_value', description='description_value', pubsub_topic='pubsub_topic_value', service_account='service_account_value', )) response = await client.get_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GetNotificationConfigRequest() # Establish that the response is the type that we expect. assert isinstance(response, notification_config.NotificationConfig) assert response.name == 'name_value' assert response.description == 'description_value' assert response.pubsub_topic == 'pubsub_topic_value' assert response.service_account == 'service_account_value' @pytest.mark.asyncio async def test_get_notification_config_async_from_dict(): await test_get_notification_config_async(request_type=dict) def test_get_notification_config_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.GetNotificationConfigRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_notification_config), '__call__') as call: call.return_value = notification_config.NotificationConfig() client.get_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_get_notification_config_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.GetNotificationConfigRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_notification_config), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(notification_config.NotificationConfig()) await client.get_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] def test_get_notification_config_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = notification_config.NotificationConfig() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_notification_config( name='name_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].name == 'name_value' def test_get_notification_config_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_notification_config( securitycenter_service.GetNotificationConfigRequest(), name='name_value', ) @pytest.mark.asyncio async def test_get_notification_config_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = notification_config.NotificationConfig() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(notification_config.NotificationConfig()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_notification_config( name='name_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].name == 'name_value' @pytest.mark.asyncio async def test_get_notification_config_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_notification_config( securitycenter_service.GetNotificationConfigRequest(), name='name_value', ) def test_get_organization_settings(transport: str = 'grpc', request_type=securitycenter_service.GetOrganizationSettingsRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_organization_settings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = organization_settings.OrganizationSettings( name='name_value', enable_asset_discovery=True, ) response = client.get_organization_settings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GetOrganizationSettingsRequest() # Establish that the response is the type that we expect. assert isinstance(response, organization_settings.OrganizationSettings) assert response.name == 'name_value' assert response.enable_asset_discovery is True def test_get_organization_settings_from_dict(): test_get_organization_settings(request_type=dict) def test_get_organization_settings_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_organization_settings), '__call__') as call: client.get_organization_settings() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GetOrganizationSettingsRequest() @pytest.mark.asyncio async def test_get_organization_settings_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.GetOrganizationSettingsRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_organization_settings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(organization_settings.OrganizationSettings( name='name_value', enable_asset_discovery=True, )) response = await client.get_organization_settings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GetOrganizationSettingsRequest() # Establish that the response is the type that we expect. assert isinstance(response, organization_settings.OrganizationSettings) assert response.name == 'name_value' assert response.enable_asset_discovery is True @pytest.mark.asyncio async def test_get_organization_settings_async_from_dict(): await test_get_organization_settings_async(request_type=dict) def test_get_organization_settings_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.GetOrganizationSettingsRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_organization_settings), '__call__') as call: call.return_value = organization_settings.OrganizationSettings() client.get_organization_settings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_get_organization_settings_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.GetOrganizationSettingsRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_organization_settings), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(organization_settings.OrganizationSettings()) await client.get_organization_settings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] def test_get_organization_settings_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_organization_settings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = organization_settings.OrganizationSettings() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_organization_settings( name='name_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].name == 'name_value' def test_get_organization_settings_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_organization_settings( securitycenter_service.GetOrganizationSettingsRequest(), name='name_value', ) @pytest.mark.asyncio async def test_get_organization_settings_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_organization_settings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = organization_settings.OrganizationSettings() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(organization_settings.OrganizationSettings()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_organization_settings( name='name_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].name == 'name_value' @pytest.mark.asyncio async def test_get_organization_settings_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_organization_settings( securitycenter_service.GetOrganizationSettingsRequest(), name='name_value', ) def test_get_source(transport: str = 'grpc', request_type=securitycenter_service.GetSourceRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = source.Source( name='name_value', display_name='display_name_value', description='description_value', canonical_name='canonical_name_value', ) response = client.get_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GetSourceRequest() # Establish that the response is the type that we expect. assert isinstance(response, source.Source) assert response.name == 'name_value' assert response.display_name == 'display_name_value' assert response.description == 'description_value' assert response.canonical_name == 'canonical_name_value' def test_get_source_from_dict(): test_get_source(request_type=dict) def test_get_source_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_source), '__call__') as call: client.get_source() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GetSourceRequest() @pytest.mark.asyncio async def test_get_source_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.GetSourceRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(source.Source( name='name_value', display_name='display_name_value', description='description_value', canonical_name='canonical_name_value', )) response = await client.get_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GetSourceRequest() # Establish that the response is the type that we expect. assert isinstance(response, source.Source) assert response.name == 'name_value' assert response.display_name == 'display_name_value' assert response.description == 'description_value' assert response.canonical_name == 'canonical_name_value' @pytest.mark.asyncio async def test_get_source_async_from_dict(): await test_get_source_async(request_type=dict) def test_get_source_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.GetSourceRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_source), '__call__') as call: call.return_value = source.Source() client.get_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_get_source_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.GetSourceRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_source), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(source.Source()) await client.get_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] def test_get_source_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = source.Source() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_source( name='name_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].name == 'name_value' def test_get_source_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_source( securitycenter_service.GetSourceRequest(), name='name_value', ) @pytest.mark.asyncio async def test_get_source_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = source.Source() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(source.Source()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_source( name='name_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].name == 'name_value' @pytest.mark.asyncio async def test_get_source_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_source( securitycenter_service.GetSourceRequest(), name='name_value', ) def test_group_assets(transport: str = 'grpc', request_type=securitycenter_service.GroupAssetsRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_assets), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.GroupAssetsResponse( next_page_token='next_page_token_value', total_size=1086, ) response = client.group_assets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GroupAssetsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.GroupAssetsPager) assert response.next_page_token == 'next_page_token_value' assert response.total_size == 1086 def test_group_assets_from_dict(): test_group_assets(request_type=dict) def test_group_assets_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_assets), '__call__') as call: client.group_assets() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GroupAssetsRequest() @pytest.mark.asyncio async def test_group_assets_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.GroupAssetsRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_assets), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.GroupAssetsResponse( next_page_token='next_page_token_value', total_size=1086, )) response = await client.group_assets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GroupAssetsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.GroupAssetsAsyncPager) assert response.next_page_token == 'next_page_token_value' assert response.total_size == 1086 @pytest.mark.asyncio async def test_group_assets_async_from_dict(): await test_group_assets_async(request_type=dict) def test_group_assets_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.GroupAssetsRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_assets), '__call__') as call: call.return_value = securitycenter_service.GroupAssetsResponse() client.group_assets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_group_assets_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.GroupAssetsRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_assets), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.GroupAssetsResponse()) await client.group_assets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_group_assets_pager(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_assets), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], next_page_token='abc', ), securitycenter_service.GroupAssetsResponse( group_by_results=[], next_page_token='def', ), securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), ], next_page_token='ghi', ), securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ('parent', ''), )), ) pager = client.group_assets(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, securitycenter_service.GroupResult) for i in results) def test_group_assets_pages(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_assets), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], next_page_token='abc', ), securitycenter_service.GroupAssetsResponse( group_by_results=[], next_page_token='def', ), securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), ], next_page_token='ghi', ), securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], ), RuntimeError, ) pages = list(client.group_assets(request={}).pages) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_group_assets_async_pager(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_assets), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], next_page_token='abc', ), securitycenter_service.GroupAssetsResponse( group_by_results=[], next_page_token='def', ), securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), ], next_page_token='ghi', ), securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], ), RuntimeError, ) async_pager = await client.group_assets(request={},) assert async_pager.next_page_token == 'abc' responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, securitycenter_service.GroupResult) for i in responses) @pytest.mark.asyncio async def test_group_assets_async_pages(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_assets), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], next_page_token='abc', ), securitycenter_service.GroupAssetsResponse( group_by_results=[], next_page_token='def', ), securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), ], next_page_token='ghi', ), securitycenter_service.GroupAssetsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], ), RuntimeError, ) pages = [] async for page_ in (await client.group_assets(request={})).pages: pages.append(page_) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token def test_group_findings(transport: str = 'grpc', request_type=securitycenter_service.GroupFindingsRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.GroupFindingsResponse( next_page_token='next_page_token_value', total_size=1086, ) response = client.group_findings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GroupFindingsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.GroupFindingsPager) assert response.next_page_token == 'next_page_token_value' assert response.total_size == 1086 def test_group_findings_from_dict(): test_group_findings(request_type=dict) def test_group_findings_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__') as call: client.group_findings() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GroupFindingsRequest() @pytest.mark.asyncio async def test_group_findings_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.GroupFindingsRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.GroupFindingsResponse( next_page_token='next_page_token_value', total_size=1086, )) response = await client.group_findings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.GroupFindingsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.GroupFindingsAsyncPager) assert response.next_page_token == 'next_page_token_value' assert response.total_size == 1086 @pytest.mark.asyncio async def test_group_findings_async_from_dict(): await test_group_findings_async(request_type=dict) def test_group_findings_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.GroupFindingsRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__') as call: call.return_value = securitycenter_service.GroupFindingsResponse() client.group_findings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_group_findings_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.GroupFindingsRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.GroupFindingsResponse()) await client.group_findings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_group_findings_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.GroupFindingsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.group_findings( parent='parent_value', group_by='group_by_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' assert args[0].group_by == 'group_by_value' def test_group_findings_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.group_findings( securitycenter_service.GroupFindingsRequest(), parent='parent_value', group_by='group_by_value', ) @pytest.mark.asyncio async def test_group_findings_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.GroupFindingsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.GroupFindingsResponse()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.group_findings( parent='parent_value', group_by='group_by_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' assert args[0].group_by == 'group_by_value' @pytest.mark.asyncio async def test_group_findings_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.group_findings( securitycenter_service.GroupFindingsRequest(), parent='parent_value', group_by='group_by_value', ) def test_group_findings_pager(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], next_page_token='abc', ), securitycenter_service.GroupFindingsResponse( group_by_results=[], next_page_token='def', ), securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), ], next_page_token='ghi', ), securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ('parent', ''), )), ) pager = client.group_findings(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, securitycenter_service.GroupResult) for i in results) def test_group_findings_pages(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], next_page_token='abc', ), securitycenter_service.GroupFindingsResponse( group_by_results=[], next_page_token='def', ), securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), ], next_page_token='ghi', ), securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], ), RuntimeError, ) pages = list(client.group_findings(request={}).pages) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_group_findings_async_pager(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], next_page_token='abc', ), securitycenter_service.GroupFindingsResponse( group_by_results=[], next_page_token='def', ), securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), ], next_page_token='ghi', ), securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], ), RuntimeError, ) async_pager = await client.group_findings(request={},) assert async_pager.next_page_token == 'abc' responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, securitycenter_service.GroupResult) for i in responses) @pytest.mark.asyncio async def test_group_findings_async_pages(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.group_findings), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], next_page_token='abc', ), securitycenter_service.GroupFindingsResponse( group_by_results=[], next_page_token='def', ), securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), ], next_page_token='ghi', ), securitycenter_service.GroupFindingsResponse( group_by_results=[ securitycenter_service.GroupResult(), securitycenter_service.GroupResult(), ], ), RuntimeError, ) pages = [] async for page_ in (await client.group_findings(request={})).pages: pages.append(page_) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token def test_list_assets(transport: str = 'grpc', request_type=securitycenter_service.ListAssetsRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_assets), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.ListAssetsResponse( next_page_token='next_page_token_value', total_size=1086, ) response = client.list_assets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListAssetsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListAssetsPager) assert response.next_page_token == 'next_page_token_value' assert response.total_size == 1086 def test_list_assets_from_dict(): test_list_assets(request_type=dict) def test_list_assets_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_assets), '__call__') as call: client.list_assets() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListAssetsRequest() @pytest.mark.asyncio async def test_list_assets_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.ListAssetsRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_assets), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.ListAssetsResponse( next_page_token='next_page_token_value', total_size=1086, )) response = await client.list_assets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListAssetsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListAssetsAsyncPager) assert response.next_page_token == 'next_page_token_value' assert response.total_size == 1086 @pytest.mark.asyncio async def test_list_assets_async_from_dict(): await test_list_assets_async(request_type=dict) def test_list_assets_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.ListAssetsRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_assets), '__call__') as call: call.return_value = securitycenter_service.ListAssetsResponse() client.list_assets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_list_assets_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.ListAssetsRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_assets), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.ListAssetsResponse()) await client.list_assets(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_list_assets_pager(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_assets), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], next_page_token='abc', ), securitycenter_service.ListAssetsResponse( list_assets_results=[], next_page_token='def', ), securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], next_page_token='ghi', ), securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ('parent', ''), )), ) pager = client.list_assets(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, securitycenter_service.ListAssetsResponse.ListAssetsResult) for i in results) def test_list_assets_pages(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_assets), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], next_page_token='abc', ), securitycenter_service.ListAssetsResponse( list_assets_results=[], next_page_token='def', ), securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], next_page_token='ghi', ), securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], ), RuntimeError, ) pages = list(client.list_assets(request={}).pages) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_assets_async_pager(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_assets), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], next_page_token='abc', ), securitycenter_service.ListAssetsResponse( list_assets_results=[], next_page_token='def', ), securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], next_page_token='ghi', ), securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], ), RuntimeError, ) async_pager = await client.list_assets(request={},) assert async_pager.next_page_token == 'abc' responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, securitycenter_service.ListAssetsResponse.ListAssetsResult) for i in responses) @pytest.mark.asyncio async def test_list_assets_async_pages(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_assets), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], next_page_token='abc', ), securitycenter_service.ListAssetsResponse( list_assets_results=[], next_page_token='def', ), securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], next_page_token='ghi', ), securitycenter_service.ListAssetsResponse( list_assets_results=[ securitycenter_service.ListAssetsResponse.ListAssetsResult(), securitycenter_service.ListAssetsResponse.ListAssetsResult(), ], ), RuntimeError, ) pages = [] async for page_ in (await client.list_assets(request={})).pages: pages.append(page_) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token def test_list_findings(transport: str = 'grpc', request_type=securitycenter_service.ListFindingsRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_findings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.ListFindingsResponse( next_page_token='next_page_token_value', total_size=1086, ) response = client.list_findings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListFindingsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListFindingsPager) assert response.next_page_token == 'next_page_token_value' assert response.total_size == 1086 def test_list_findings_from_dict(): test_list_findings(request_type=dict) def test_list_findings_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_findings), '__call__') as call: client.list_findings() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListFindingsRequest() @pytest.mark.asyncio async def test_list_findings_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.ListFindingsRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_findings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.ListFindingsResponse( next_page_token='next_page_token_value', total_size=1086, )) response = await client.list_findings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListFindingsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListFindingsAsyncPager) assert response.next_page_token == 'next_page_token_value' assert response.total_size == 1086 @pytest.mark.asyncio async def test_list_findings_async_from_dict(): await test_list_findings_async(request_type=dict) def test_list_findings_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.ListFindingsRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_findings), '__call__') as call: call.return_value = securitycenter_service.ListFindingsResponse() client.list_findings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_list_findings_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.ListFindingsRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_findings), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.ListFindingsResponse()) await client.list_findings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_list_findings_pager(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_findings), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], next_page_token='abc', ), securitycenter_service.ListFindingsResponse( list_findings_results=[], next_page_token='def', ), securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], next_page_token='ghi', ), securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ('parent', ''), )), ) pager = client.list_findings(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, securitycenter_service.ListFindingsResponse.ListFindingsResult) for i in results) def test_list_findings_pages(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_findings), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], next_page_token='abc', ), securitycenter_service.ListFindingsResponse( list_findings_results=[], next_page_token='def', ), securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], next_page_token='ghi', ), securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], ), RuntimeError, ) pages = list(client.list_findings(request={}).pages) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_findings_async_pager(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_findings), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], next_page_token='abc', ), securitycenter_service.ListFindingsResponse( list_findings_results=[], next_page_token='def', ), securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], next_page_token='ghi', ), securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], ), RuntimeError, ) async_pager = await client.list_findings(request={},) assert async_pager.next_page_token == 'abc' responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, securitycenter_service.ListFindingsResponse.ListFindingsResult) for i in responses) @pytest.mark.asyncio async def test_list_findings_async_pages(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_findings), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], next_page_token='abc', ), securitycenter_service.ListFindingsResponse( list_findings_results=[], next_page_token='def', ), securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], next_page_token='ghi', ), securitycenter_service.ListFindingsResponse( list_findings_results=[ securitycenter_service.ListFindingsResponse.ListFindingsResult(), securitycenter_service.ListFindingsResponse.ListFindingsResult(), ], ), RuntimeError, ) pages = [] async for page_ in (await client.list_findings(request={})).pages: pages.append(page_) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token def test_list_notification_configs(transport: str = 'grpc', request_type=securitycenter_service.ListNotificationConfigsRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.ListNotificationConfigsResponse( next_page_token='next_page_token_value', ) response = client.list_notification_configs(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListNotificationConfigsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListNotificationConfigsPager) assert response.next_page_token == 'next_page_token_value' def test_list_notification_configs_from_dict(): test_list_notification_configs(request_type=dict) def test_list_notification_configs_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__') as call: client.list_notification_configs() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListNotificationConfigsRequest() @pytest.mark.asyncio async def test_list_notification_configs_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.ListNotificationConfigsRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.ListNotificationConfigsResponse( next_page_token='next_page_token_value', )) response = await client.list_notification_configs(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListNotificationConfigsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListNotificationConfigsAsyncPager) assert response.next_page_token == 'next_page_token_value' @pytest.mark.asyncio async def test_list_notification_configs_async_from_dict(): await test_list_notification_configs_async(request_type=dict) def test_list_notification_configs_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.ListNotificationConfigsRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__') as call: call.return_value = securitycenter_service.ListNotificationConfigsResponse() client.list_notification_configs(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_list_notification_configs_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.ListNotificationConfigsRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.ListNotificationConfigsResponse()) await client.list_notification_configs(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_list_notification_configs_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.ListNotificationConfigsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_notification_configs( parent='parent_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' def test_list_notification_configs_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_notification_configs( securitycenter_service.ListNotificationConfigsRequest(), parent='parent_value', ) @pytest.mark.asyncio async def test_list_notification_configs_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.ListNotificationConfigsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.ListNotificationConfigsResponse()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_notification_configs( parent='parent_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' @pytest.mark.asyncio async def test_list_notification_configs_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_notification_configs( securitycenter_service.ListNotificationConfigsRequest(), parent='parent_value', ) def test_list_notification_configs_pager(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), notification_config.NotificationConfig(), notification_config.NotificationConfig(), ], next_page_token='abc', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[], next_page_token='def', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), ], next_page_token='ghi', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), notification_config.NotificationConfig(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ('parent', ''), )), ) pager = client.list_notification_configs(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, notification_config.NotificationConfig) for i in results) def test_list_notification_configs_pages(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), notification_config.NotificationConfig(), notification_config.NotificationConfig(), ], next_page_token='abc', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[], next_page_token='def', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), ], next_page_token='ghi', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), notification_config.NotificationConfig(), ], ), RuntimeError, ) pages = list(client.list_notification_configs(request={}).pages) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_notification_configs_async_pager(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), notification_config.NotificationConfig(), notification_config.NotificationConfig(), ], next_page_token='abc', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[], next_page_token='def', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), ], next_page_token='ghi', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), notification_config.NotificationConfig(), ], ), RuntimeError, ) async_pager = await client.list_notification_configs(request={},) assert async_pager.next_page_token == 'abc' responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, notification_config.NotificationConfig) for i in responses) @pytest.mark.asyncio async def test_list_notification_configs_async_pages(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_notification_configs), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), notification_config.NotificationConfig(), notification_config.NotificationConfig(), ], next_page_token='abc', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[], next_page_token='def', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), ], next_page_token='ghi', ), securitycenter_service.ListNotificationConfigsResponse( notification_configs=[ notification_config.NotificationConfig(), notification_config.NotificationConfig(), ], ), RuntimeError, ) pages = [] async for page_ in (await client.list_notification_configs(request={})).pages: pages.append(page_) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token def test_list_sources(transport: str = 'grpc', request_type=securitycenter_service.ListSourcesRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.ListSourcesResponse( next_page_token='next_page_token_value', ) response = client.list_sources(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListSourcesRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListSourcesPager) assert response.next_page_token == 'next_page_token_value' def test_list_sources_from_dict(): test_list_sources(request_type=dict) def test_list_sources_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__') as call: client.list_sources() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListSourcesRequest() @pytest.mark.asyncio async def test_list_sources_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.ListSourcesRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.ListSourcesResponse( next_page_token='next_page_token_value', )) response = await client.list_sources(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.ListSourcesRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListSourcesAsyncPager) assert response.next_page_token == 'next_page_token_value' @pytest.mark.asyncio async def test_list_sources_async_from_dict(): await test_list_sources_async(request_type=dict) def test_list_sources_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.ListSourcesRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__') as call: call.return_value = securitycenter_service.ListSourcesResponse() client.list_sources(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_list_sources_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.ListSourcesRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.ListSourcesResponse()) await client.list_sources(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_list_sources_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.ListSourcesResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_sources( parent='parent_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' def test_list_sources_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_sources( securitycenter_service.ListSourcesRequest(), parent='parent_value', ) @pytest.mark.asyncio async def test_list_sources_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = securitycenter_service.ListSourcesResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(securitycenter_service.ListSourcesResponse()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_sources( parent='parent_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' @pytest.mark.asyncio async def test_list_sources_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_sources( securitycenter_service.ListSourcesRequest(), parent='parent_value', ) def test_list_sources_pager(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListSourcesResponse( sources=[ source.Source(), source.Source(), source.Source(), ], next_page_token='abc', ), securitycenter_service.ListSourcesResponse( sources=[], next_page_token='def', ), securitycenter_service.ListSourcesResponse( sources=[ source.Source(), ], next_page_token='ghi', ), securitycenter_service.ListSourcesResponse( sources=[ source.Source(), source.Source(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ('parent', ''), )), ) pager = client.list_sources(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, source.Source) for i in results) def test_list_sources_pages(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__') as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListSourcesResponse( sources=[ source.Source(), source.Source(), source.Source(), ], next_page_token='abc', ), securitycenter_service.ListSourcesResponse( sources=[], next_page_token='def', ), securitycenter_service.ListSourcesResponse( sources=[ source.Source(), ], next_page_token='ghi', ), securitycenter_service.ListSourcesResponse( sources=[ source.Source(), source.Source(), ], ), RuntimeError, ) pages = list(client.list_sources(request={}).pages) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_sources_async_pager(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListSourcesResponse( sources=[ source.Source(), source.Source(), source.Source(), ], next_page_token='abc', ), securitycenter_service.ListSourcesResponse( sources=[], next_page_token='def', ), securitycenter_service.ListSourcesResponse( sources=[ source.Source(), ], next_page_token='ghi', ), securitycenter_service.ListSourcesResponse( sources=[ source.Source(), source.Source(), ], ), RuntimeError, ) async_pager = await client.list_sources(request={},) assert async_pager.next_page_token == 'abc' responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, source.Source) for i in responses) @pytest.mark.asyncio async def test_list_sources_async_pages(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_sources), '__call__', new_callable=mock.AsyncMock) as call: # Set the response to a series of pages. call.side_effect = ( securitycenter_service.ListSourcesResponse( sources=[ source.Source(), source.Source(), source.Source(), ], next_page_token='abc', ), securitycenter_service.ListSourcesResponse( sources=[], next_page_token='def', ), securitycenter_service.ListSourcesResponse( sources=[ source.Source(), ], next_page_token='ghi', ), securitycenter_service.ListSourcesResponse( sources=[ source.Source(), source.Source(), ], ), RuntimeError, ) pages = [] async for page_ in (await client.list_sources(request={})).pages: pages.append(page_) for page_, token in zip(pages, ['abc','def','ghi', '']): assert page_.raw_page.next_page_token == token def test_run_asset_discovery(transport: str = 'grpc', request_type=securitycenter_service.RunAssetDiscoveryRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.run_asset_discovery), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name='operations/spam') response = client.run_asset_discovery(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.RunAssetDiscoveryRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_run_asset_discovery_from_dict(): test_run_asset_discovery(request_type=dict) def test_run_asset_discovery_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.run_asset_discovery), '__call__') as call: client.run_asset_discovery() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.RunAssetDiscoveryRequest() @pytest.mark.asyncio async def test_run_asset_discovery_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.RunAssetDiscoveryRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.run_asset_discovery), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name='operations/spam') ) response = await client.run_asset_discovery(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.RunAssetDiscoveryRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_run_asset_discovery_async_from_dict(): await test_run_asset_discovery_async(request_type=dict) def test_run_asset_discovery_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.RunAssetDiscoveryRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.run_asset_discovery), '__call__') as call: call.return_value = operations_pb2.Operation(name='operations/op') client.run_asset_discovery(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_run_asset_discovery_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.RunAssetDiscoveryRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.run_asset_discovery), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) await client.run_asset_discovery(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_run_asset_discovery_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.run_asset_discovery), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name='operations/op') # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.run_asset_discovery( parent='parent_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' def test_run_asset_discovery_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.run_asset_discovery( securitycenter_service.RunAssetDiscoveryRequest(), parent='parent_value', ) @pytest.mark.asyncio async def test_run_asset_discovery_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.run_asset_discovery), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name='operations/op') call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name='operations/spam') ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.run_asset_discovery( parent='parent_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].parent == 'parent_value' @pytest.mark.asyncio async def test_run_asset_discovery_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.run_asset_discovery( securitycenter_service.RunAssetDiscoveryRequest(), parent='parent_value', ) def test_set_finding_state(transport: str = 'grpc', request_type=securitycenter_service.SetFindingStateRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_finding_state), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = finding.Finding( name='name_value', parent='parent_value', resource_name='resource_name_value', state=finding.Finding.State.ACTIVE, category='category_value', external_uri='external_uri_value', severity=finding.Finding.Severity.CRITICAL, canonical_name='canonical_name_value', ) response = client.set_finding_state(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.SetFindingStateRequest() # Establish that the response is the type that we expect. assert isinstance(response, finding.Finding) assert response.name == 'name_value' assert response.parent == 'parent_value' assert response.resource_name == 'resource_name_value' assert response.state == finding.Finding.State.ACTIVE assert response.category == 'category_value' assert response.external_uri == 'external_uri_value' assert response.severity == finding.Finding.Severity.CRITICAL assert response.canonical_name == 'canonical_name_value' def test_set_finding_state_from_dict(): test_set_finding_state(request_type=dict) def test_set_finding_state_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_finding_state), '__call__') as call: client.set_finding_state() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.SetFindingStateRequest() @pytest.mark.asyncio async def test_set_finding_state_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.SetFindingStateRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_finding_state), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(finding.Finding( name='name_value', parent='parent_value', resource_name='resource_name_value', state=finding.Finding.State.ACTIVE, category='category_value', external_uri='external_uri_value', severity=finding.Finding.Severity.CRITICAL, canonical_name='canonical_name_value', )) response = await client.set_finding_state(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.SetFindingStateRequest() # Establish that the response is the type that we expect. assert isinstance(response, finding.Finding) assert response.name == 'name_value' assert response.parent == 'parent_value' assert response.resource_name == 'resource_name_value' assert response.state == finding.Finding.State.ACTIVE assert response.category == 'category_value' assert response.external_uri == 'external_uri_value' assert response.severity == finding.Finding.Severity.CRITICAL assert response.canonical_name == 'canonical_name_value' @pytest.mark.asyncio async def test_set_finding_state_async_from_dict(): await test_set_finding_state_async(request_type=dict) def test_set_finding_state_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.SetFindingStateRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_finding_state), '__call__') as call: call.return_value = finding.Finding() client.set_finding_state(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_set_finding_state_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.SetFindingStateRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_finding_state), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(finding.Finding()) await client.set_finding_state(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] def test_set_finding_state_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_finding_state), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = finding.Finding() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.set_finding_state( name='name_value', state=finding.Finding.State.ACTIVE, start_time=timestamp_pb2.Timestamp(seconds=751), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].name == 'name_value' assert args[0].state == finding.Finding.State.ACTIVE assert TimestampRule().to_proto(args[0].start_time) == timestamp_pb2.Timestamp(seconds=751) def test_set_finding_state_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.set_finding_state( securitycenter_service.SetFindingStateRequest(), name='name_value', state=finding.Finding.State.ACTIVE, start_time=timestamp_pb2.Timestamp(seconds=751), ) @pytest.mark.asyncio async def test_set_finding_state_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_finding_state), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = finding.Finding() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(finding.Finding()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.set_finding_state( name='name_value', state=finding.Finding.State.ACTIVE, start_time=timestamp_pb2.Timestamp(seconds=751), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].name == 'name_value' assert args[0].state == finding.Finding.State.ACTIVE assert TimestampRule().to_proto(args[0].start_time) == timestamp_pb2.Timestamp(seconds=751) @pytest.mark.asyncio async def test_set_finding_state_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.set_finding_state( securitycenter_service.SetFindingStateRequest(), name='name_value', state=finding.Finding.State.ACTIVE, start_time=timestamp_pb2.Timestamp(seconds=751), ) def test_set_iam_policy(transport: str = 'grpc', request_type=iam_policy_pb2.SetIamPolicyRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_iam_policy), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = policy_pb2.Policy( version=774, etag=b'etag_blob', ) response = client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.SetIamPolicyRequest() # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) assert response.version == 774 assert response.etag == b'etag_blob' def test_set_iam_policy_from_dict(): test_set_iam_policy(request_type=dict) def test_set_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_iam_policy), '__call__') as call: client.set_iam_policy() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.SetIamPolicyRequest() @pytest.mark.asyncio async def test_set_iam_policy_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.SetIamPolicyRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_iam_policy), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy( version=774, etag=b'etag_blob', )) response = await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.SetIamPolicyRequest() # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) assert response.version == 774 assert response.etag == b'etag_blob' @pytest.mark.asyncio async def test_set_iam_policy_async_from_dict(): await test_set_iam_policy_async(request_type=dict) def test_set_iam_policy_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = iam_policy_pb2.SetIamPolicyRequest() request.resource = 'resource/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_iam_policy), '__call__') as call: call.return_value = policy_pb2.Policy() client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'resource=resource/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_set_iam_policy_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = iam_policy_pb2.SetIamPolicyRequest() request.resource = 'resource/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_iam_policy), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'resource=resource/value', ) in kw['metadata'] def test_set_iam_policy_from_dict_foreign(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_iam_policy), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = policy_pb2.Policy() response = client.set_iam_policy(request={ 'resource': 'resource_value', 'policy': policy_pb2.Policy(version=774), } ) call.assert_called() def test_set_iam_policy_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_iam_policy), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = policy_pb2.Policy() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.set_iam_policy( resource='resource_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].resource == 'resource_value' def test_set_iam_policy_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.set_iam_policy( iam_policy_pb2.SetIamPolicyRequest(), resource='resource_value', ) @pytest.mark.asyncio async def test_set_iam_policy_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.set_iam_policy), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = policy_pb2.Policy() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.set_iam_policy( resource='resource_value', ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].resource == 'resource_value' @pytest.mark.asyncio async def test_set_iam_policy_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.set_iam_policy( iam_policy_pb2.SetIamPolicyRequest(), resource='resource_value', ) def test_test_iam_permissions(transport: str = 'grpc', request_type=iam_policy_pb2.TestIamPermissionsRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = iam_policy_pb2.TestIamPermissionsResponse( permissions=['permissions_value'], ) response = client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() # Establish that the response is the type that we expect. assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) assert response.permissions == ['permissions_value'] def test_test_iam_permissions_from_dict(): test_test_iam_permissions(request_type=dict) def test_test_iam_permissions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), '__call__') as call: client.test_iam_permissions() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() @pytest.mark.asyncio async def test_test_iam_permissions_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.TestIamPermissionsRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse( permissions=['permissions_value'], )) response = await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() # Establish that the response is the type that we expect. assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) assert response.permissions == ['permissions_value'] @pytest.mark.asyncio async def test_test_iam_permissions_async_from_dict(): await test_test_iam_permissions_async(request_type=dict) def test_test_iam_permissions_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = iam_policy_pb2.TestIamPermissionsRequest() request.resource = 'resource/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), '__call__') as call: call.return_value = iam_policy_pb2.TestIamPermissionsResponse() client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'resource=resource/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_test_iam_permissions_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = iam_policy_pb2.TestIamPermissionsRequest() request.resource = 'resource/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse()) await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'resource=resource/value', ) in kw['metadata'] def test_test_iam_permissions_from_dict_foreign(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = iam_policy_pb2.TestIamPermissionsResponse() response = client.test_iam_permissions(request={ 'resource': 'resource_value', 'permissions': ['permissions_value'], } ) call.assert_called() def test_test_iam_permissions_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = iam_policy_pb2.TestIamPermissionsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.test_iam_permissions( resource='resource_value', permissions=['permissions_value'], ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].resource == 'resource_value' assert args[0].permissions == ['permissions_value'] def test_test_iam_permissions_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.test_iam_permissions( iam_policy_pb2.TestIamPermissionsRequest(), resource='resource_value', permissions=['permissions_value'], ) @pytest.mark.asyncio async def test_test_iam_permissions_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = iam_policy_pb2.TestIamPermissionsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.test_iam_permissions( resource='resource_value', permissions=['permissions_value'], ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].resource == 'resource_value' assert args[0].permissions == ['permissions_value'] @pytest.mark.asyncio async def test_test_iam_permissions_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.test_iam_permissions( iam_policy_pb2.TestIamPermissionsRequest(), resource='resource_value', permissions=['permissions_value'], ) def test_update_finding(transport: str = 'grpc', request_type=securitycenter_service.UpdateFindingRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_finding), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_finding.Finding( name='name_value', parent='parent_value', resource_name='resource_name_value', state=gcs_finding.Finding.State.ACTIVE, category='category_value', external_uri='external_uri_value', severity=gcs_finding.Finding.Severity.CRITICAL, canonical_name='canonical_name_value', ) response = client.update_finding(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateFindingRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_finding.Finding) assert response.name == 'name_value' assert response.parent == 'parent_value' assert response.resource_name == 'resource_name_value' assert response.state == gcs_finding.Finding.State.ACTIVE assert response.category == 'category_value' assert response.external_uri == 'external_uri_value' assert response.severity == gcs_finding.Finding.Severity.CRITICAL assert response.canonical_name == 'canonical_name_value' def test_update_finding_from_dict(): test_update_finding(request_type=dict) def test_update_finding_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_finding), '__call__') as call: client.update_finding() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateFindingRequest() @pytest.mark.asyncio async def test_update_finding_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.UpdateFindingRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_finding), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding( name='name_value', parent='parent_value', resource_name='resource_name_value', state=gcs_finding.Finding.State.ACTIVE, category='category_value', external_uri='external_uri_value', severity=gcs_finding.Finding.Severity.CRITICAL, canonical_name='canonical_name_value', )) response = await client.update_finding(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateFindingRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_finding.Finding) assert response.name == 'name_value' assert response.parent == 'parent_value' assert response.resource_name == 'resource_name_value' assert response.state == gcs_finding.Finding.State.ACTIVE assert response.category == 'category_value' assert response.external_uri == 'external_uri_value' assert response.severity == gcs_finding.Finding.Severity.CRITICAL assert response.canonical_name == 'canonical_name_value' @pytest.mark.asyncio async def test_update_finding_async_from_dict(): await test_update_finding_async(request_type=dict) def test_update_finding_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.UpdateFindingRequest() request.finding.name = 'finding.name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_finding), '__call__') as call: call.return_value = gcs_finding.Finding() client.update_finding(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'finding.name=finding.name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_update_finding_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.UpdateFindingRequest() request.finding.name = 'finding.name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_finding), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding()) await client.update_finding(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'finding.name=finding.name/value', ) in kw['metadata'] def test_update_finding_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_finding), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_finding.Finding() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_finding( finding=gcs_finding.Finding(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].finding == gcs_finding.Finding(name='name_value') def test_update_finding_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_finding( securitycenter_service.UpdateFindingRequest(), finding=gcs_finding.Finding(name='name_value'), ) @pytest.mark.asyncio async def test_update_finding_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_finding), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_finding.Finding() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_finding( finding=gcs_finding.Finding(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].finding == gcs_finding.Finding(name='name_value') @pytest.mark.asyncio async def test_update_finding_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_finding( securitycenter_service.UpdateFindingRequest(), finding=gcs_finding.Finding(name='name_value'), ) def test_update_notification_config(transport: str = 'grpc', request_type=securitycenter_service.UpdateNotificationConfigRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_notification_config.NotificationConfig( name='name_value', description='description_value', pubsub_topic='pubsub_topic_value', service_account='service_account_value', streaming_config=gcs_notification_config.NotificationConfig.StreamingConfig(filter='filter_value'), ) response = client.update_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateNotificationConfigRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_notification_config.NotificationConfig) assert response.name == 'name_value' assert response.description == 'description_value' assert response.pubsub_topic == 'pubsub_topic_value' assert response.service_account == 'service_account_value' def test_update_notification_config_from_dict(): test_update_notification_config(request_type=dict) def test_update_notification_config_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_notification_config), '__call__') as call: client.update_notification_config() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateNotificationConfigRequest() @pytest.mark.asyncio async def test_update_notification_config_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.UpdateNotificationConfigRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcs_notification_config.NotificationConfig( name='name_value', description='description_value', pubsub_topic='pubsub_topic_value', service_account='service_account_value', )) response = await client.update_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateNotificationConfigRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_notification_config.NotificationConfig) assert response.name == 'name_value' assert response.description == 'description_value' assert response.pubsub_topic == 'pubsub_topic_value' assert response.service_account == 'service_account_value' @pytest.mark.asyncio async def test_update_notification_config_async_from_dict(): await test_update_notification_config_async(request_type=dict) def test_update_notification_config_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.UpdateNotificationConfigRequest() request.notification_config.name = 'notification_config.name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_notification_config), '__call__') as call: call.return_value = gcs_notification_config.NotificationConfig() client.update_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'notification_config.name=notification_config.name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_update_notification_config_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.UpdateNotificationConfigRequest() request.notification_config.name = 'notification_config.name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_notification_config), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_notification_config.NotificationConfig()) await client.update_notification_config(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'notification_config.name=notification_config.name/value', ) in kw['metadata'] def test_update_notification_config_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_notification_config.NotificationConfig() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_notification_config( notification_config=gcs_notification_config.NotificationConfig(name='name_value'), update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].notification_config == gcs_notification_config.NotificationConfig(name='name_value') assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) def test_update_notification_config_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_notification_config( securitycenter_service.UpdateNotificationConfigRequest(), notification_config=gcs_notification_config.NotificationConfig(name='name_value'), update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), ) @pytest.mark.asyncio async def test_update_notification_config_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_notification_config), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_notification_config.NotificationConfig() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_notification_config.NotificationConfig()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_notification_config( notification_config=gcs_notification_config.NotificationConfig(name='name_value'), update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].notification_config == gcs_notification_config.NotificationConfig(name='name_value') assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) @pytest.mark.asyncio async def test_update_notification_config_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_notification_config( securitycenter_service.UpdateNotificationConfigRequest(), notification_config=gcs_notification_config.NotificationConfig(name='name_value'), update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), ) def test_update_organization_settings(transport: str = 'grpc', request_type=securitycenter_service.UpdateOrganizationSettingsRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_organization_settings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_organization_settings.OrganizationSettings( name='name_value', enable_asset_discovery=True, ) response = client.update_organization_settings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateOrganizationSettingsRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_organization_settings.OrganizationSettings) assert response.name == 'name_value' assert response.enable_asset_discovery is True def test_update_organization_settings_from_dict(): test_update_organization_settings(request_type=dict) def test_update_organization_settings_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_organization_settings), '__call__') as call: client.update_organization_settings() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateOrganizationSettingsRequest() @pytest.mark.asyncio async def test_update_organization_settings_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.UpdateOrganizationSettingsRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_organization_settings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcs_organization_settings.OrganizationSettings( name='name_value', enable_asset_discovery=True, )) response = await client.update_organization_settings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateOrganizationSettingsRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_organization_settings.OrganizationSettings) assert response.name == 'name_value' assert response.enable_asset_discovery is True @pytest.mark.asyncio async def test_update_organization_settings_async_from_dict(): await test_update_organization_settings_async(request_type=dict) def test_update_organization_settings_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.UpdateOrganizationSettingsRequest() request.organization_settings.name = 'organization_settings.name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_organization_settings), '__call__') as call: call.return_value = gcs_organization_settings.OrganizationSettings() client.update_organization_settings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'organization_settings.name=organization_settings.name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_update_organization_settings_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.UpdateOrganizationSettingsRequest() request.organization_settings.name = 'organization_settings.name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_organization_settings), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_organization_settings.OrganizationSettings()) await client.update_organization_settings(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'organization_settings.name=organization_settings.name/value', ) in kw['metadata'] def test_update_organization_settings_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_organization_settings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_organization_settings.OrganizationSettings() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_organization_settings( organization_settings=gcs_organization_settings.OrganizationSettings(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].organization_settings == gcs_organization_settings.OrganizationSettings(name='name_value') def test_update_organization_settings_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_organization_settings( securitycenter_service.UpdateOrganizationSettingsRequest(), organization_settings=gcs_organization_settings.OrganizationSettings(name='name_value'), ) @pytest.mark.asyncio async def test_update_organization_settings_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_organization_settings), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_organization_settings.OrganizationSettings() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_organization_settings.OrganizationSettings()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_organization_settings( organization_settings=gcs_organization_settings.OrganizationSettings(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].organization_settings == gcs_organization_settings.OrganizationSettings(name='name_value') @pytest.mark.asyncio async def test_update_organization_settings_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_organization_settings( securitycenter_service.UpdateOrganizationSettingsRequest(), organization_settings=gcs_organization_settings.OrganizationSettings(name='name_value'), ) def test_update_source(transport: str = 'grpc', request_type=securitycenter_service.UpdateSourceRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_source.Source( name='name_value', display_name='display_name_value', description='description_value', canonical_name='canonical_name_value', ) response = client.update_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateSourceRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_source.Source) assert response.name == 'name_value' assert response.display_name == 'display_name_value' assert response.description == 'description_value' assert response.canonical_name == 'canonical_name_value' def test_update_source_from_dict(): test_update_source(request_type=dict) def test_update_source_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_source), '__call__') as call: client.update_source() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateSourceRequest() @pytest.mark.asyncio async def test_update_source_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.UpdateSourceRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source( name='name_value', display_name='display_name_value', description='description_value', canonical_name='canonical_name_value', )) response = await client.update_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateSourceRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_source.Source) assert response.name == 'name_value' assert response.display_name == 'display_name_value' assert response.description == 'description_value' assert response.canonical_name == 'canonical_name_value' @pytest.mark.asyncio async def test_update_source_async_from_dict(): await test_update_source_async(request_type=dict) def test_update_source_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.UpdateSourceRequest() request.source.name = 'source.name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_source), '__call__') as call: call.return_value = gcs_source.Source() client.update_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'source.name=source.name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_update_source_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.UpdateSourceRequest() request.source.name = 'source.name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_source), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source()) await client.update_source(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'source.name=source.name/value', ) in kw['metadata'] def test_update_source_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_source.Source() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_source( source=gcs_source.Source(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].source == gcs_source.Source(name='name_value') def test_update_source_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_source( securitycenter_service.UpdateSourceRequest(), source=gcs_source.Source(name='name_value'), ) @pytest.mark.asyncio async def test_update_source_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_source), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_source.Source() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_source( source=gcs_source.Source(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].source == gcs_source.Source(name='name_value') @pytest.mark.asyncio async def test_update_source_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_source( securitycenter_service.UpdateSourceRequest(), source=gcs_source.Source(name='name_value'), ) def test_update_security_marks(transport: str = 'grpc', request_type=securitycenter_service.UpdateSecurityMarksRequest): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_security_marks), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_security_marks.SecurityMarks( name='name_value', canonical_name='canonical_name_value', ) response = client.update_security_marks(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateSecurityMarksRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_security_marks.SecurityMarks) assert response.name == 'name_value' assert response.canonical_name == 'canonical_name_value' def test_update_security_marks_from_dict(): test_update_security_marks(request_type=dict) def test_update_security_marks_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_security_marks), '__call__') as call: client.update_security_marks() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateSecurityMarksRequest() @pytest.mark.asyncio async def test_update_security_marks_async(transport: str = 'grpc_asyncio', request_type=securitycenter_service.UpdateSecurityMarksRequest): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_security_marks), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcs_security_marks.SecurityMarks( name='name_value', canonical_name='canonical_name_value', )) response = await client.update_security_marks(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == securitycenter_service.UpdateSecurityMarksRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcs_security_marks.SecurityMarks) assert response.name == 'name_value' assert response.canonical_name == 'canonical_name_value' @pytest.mark.asyncio async def test_update_security_marks_async_from_dict(): await test_update_security_marks_async(request_type=dict) def test_update_security_marks_field_headers(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.UpdateSecurityMarksRequest() request.security_marks.name = 'security_marks.name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_security_marks), '__call__') as call: call.return_value = gcs_security_marks.SecurityMarks() client.update_security_marks(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'security_marks.name=security_marks.name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_update_security_marks_field_headers_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = securitycenter_service.UpdateSecurityMarksRequest() request.security_marks.name = 'security_marks.name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_security_marks), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_security_marks.SecurityMarks()) await client.update_security_marks(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'security_marks.name=security_marks.name/value', ) in kw['metadata'] def test_update_security_marks_flattened(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_security_marks), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_security_marks.SecurityMarks() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_security_marks( security_marks=gcs_security_marks.SecurityMarks(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].security_marks == gcs_security_marks.SecurityMarks(name='name_value') def test_update_security_marks_flattened_error(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_security_marks( securitycenter_service.UpdateSecurityMarksRequest(), security_marks=gcs_security_marks.SecurityMarks(name='name_value'), ) @pytest.mark.asyncio async def test_update_security_marks_flattened_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_security_marks), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = gcs_security_marks.SecurityMarks() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_security_marks.SecurityMarks()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_security_marks( security_marks=gcs_security_marks.SecurityMarks(name='name_value'), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].security_marks == gcs_security_marks.SecurityMarks(name='name_value') @pytest.mark.asyncio async def test_update_security_marks_flattened_error_async(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_security_marks( securitycenter_service.UpdateSecurityMarksRequest(), security_marks=gcs_security_marks.SecurityMarks(name='name_value'), ) def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.SecurityCenterGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.SecurityCenterGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = SecurityCenterClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide scopes and a transport instance. transport = transports.SecurityCenterGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = SecurityCenterClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.SecurityCenterGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = SecurityCenterClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.SecurityCenterGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.SecurityCenterGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize("transport_class", [ transports.SecurityCenterGrpcTransport, transports.SecurityCenterGrpcAsyncIOTransport, ]) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, 'default') as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), ) assert isinstance( client.transport, transports.SecurityCenterGrpcTransport, ) def test_security_center_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.SecurityCenterTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json" ) def test_security_center_base_transport(): # Instantiate the base transport. with mock.patch('google.cloud.securitycenter_v1.services.security_center.transports.SecurityCenterTransport.__init__') as Transport: Transport.return_value = None transport = transports.SecurityCenterTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( 'create_source', 'create_finding', 'create_notification_config', 'delete_notification_config', 'get_iam_policy', 'get_notification_config', 'get_organization_settings', 'get_source', 'group_assets', 'group_findings', 'list_assets', 'list_findings', 'list_notification_configs', 'list_sources', 'run_asset_discovery', 'set_finding_state', 'set_iam_policy', 'test_iam_permissions', 'update_finding', 'update_notification_config', 'update_organization_settings', 'update_source', 'update_security_marks', ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): transport.operations_client @requires_google_auth_gte_1_25_0 def test_security_center_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.securitycenter_v1.services.security_center.transports.SecurityCenterTransport._prep_wrapped_messages') as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.SecurityCenterTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with("credentials.json", scopes=None, default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', ), quota_project_id="octopus", ) @requires_google_auth_lt_1_25_0 def test_security_center_base_transport_with_credentials_file_old_google_auth(): # Instantiate the base transport with a credentials file with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.securitycenter_v1.services.security_center.transports.SecurityCenterTransport._prep_wrapped_messages') as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.SecurityCenterTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with("credentials.json", scopes=( 'https://www.googleapis.com/auth/cloud-platform', ), quota_project_id="octopus", ) def test_security_center_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.securitycenter_v1.services.security_center.transports.SecurityCenterTransport._prep_wrapped_messages') as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.SecurityCenterTransport() adc.assert_called_once() @requires_google_auth_gte_1_25_0 def test_security_center_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, 'default', autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) SecurityCenterClient() adc.assert_called_once_with( scopes=None, default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', ), quota_project_id=None, ) @requires_google_auth_lt_1_25_0 def test_security_center_auth_adc_old_google_auth(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, 'default', autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) SecurityCenterClient() adc.assert_called_once_with( scopes=( 'https://www.googleapis.com/auth/cloud-platform',), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [ transports.SecurityCenterGrpcTransport, transports.SecurityCenterGrpcAsyncIOTransport, ], ) @requires_google_auth_gte_1_25_0 def test_security_center_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, 'default', autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class", [ transports.SecurityCenterGrpcTransport, transports.SecurityCenterGrpcAsyncIOTransport, ], ) @requires_google_auth_lt_1_25_0 def test_security_center_transport_auth_adc_old_google_auth(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus") adc.assert_called_once_with(scopes=( 'https://www.googleapis.com/auth/cloud-platform', ), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.SecurityCenterGrpcTransport, grpc_helpers), (transports.SecurityCenterGrpcAsyncIOTransport, grpc_helpers_async) ], ) def test_security_center_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class( quota_project_id="octopus", scopes=["1", "2"] ) create_channel.assert_called_with( "securitycenter.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', ), scopes=["1", "2"], default_host="securitycenter.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize("transport_class", [transports.SecurityCenterGrpcTransport, transports.SecurityCenterGrpcAsyncIOTransport]) def test_security_center_grpc_transport_client_cert_source_for_mtls( transport_class ): cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls # is used. with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) def test_security_center_host_no_port(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions(api_endpoint='securitycenter.googleapis.com'), ) assert client.transport._host == 'securitycenter.googleapis.com:443' def test_security_center_host_with_port(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions(api_endpoint='securitycenter.googleapis.com:8000'), ) assert client.transport._host == 'securitycenter.googleapis.com:8000' def test_security_center_grpc_transport_channel(): channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.SecurityCenterGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_security_center_grpc_asyncio_transport_channel(): channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.SecurityCenterGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize("transport_class", [transports.SecurityCenterGrpcTransport, transports.SecurityCenterGrpcAsyncIOTransport]) def test_security_center_transport_channel_mtls_with_client_cert_source( transport_class ): with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, 'default') as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize("transport_class", [transports.SecurityCenterGrpcTransport, transports.SecurityCenterGrpcAsyncIOTransport]) def test_security_center_transport_channel_mtls_with_adc( transport_class ): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_security_center_grpc_lro_client(): client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) transport = client.transport # Ensure that we have a api-core operations client. assert isinstance( transport.operations_client, operations_v1.OperationsClient, ) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client def test_security_center_grpc_lro_async_client(): client = SecurityCenterAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc_asyncio', ) transport = client.transport # Ensure that we have a api-core operations client. assert isinstance( transport.operations_client, operations_v1.OperationsAsyncClient, ) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client def test_asset_path(): organization = "squid" asset = "clam" expected = "organizations/{organization}/assets/{asset}".format(organization=organization, asset=asset, ) actual = SecurityCenterClient.asset_path(organization, asset) assert expected == actual def test_parse_asset_path(): expected = { "organization": "whelk", "asset": "octopus", } path = SecurityCenterClient.asset_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_asset_path(path) assert expected == actual def test_finding_path(): organization = "oyster" source = "nudibranch" finding = "cuttlefish" expected = "organizations/{organization}/sources/{source}/findings/{finding}".format(organization=organization, source=source, finding=finding, ) actual = SecurityCenterClient.finding_path(organization, source, finding) assert expected == actual def test_parse_finding_path(): expected = { "organization": "mussel", "source": "winkle", "finding": "nautilus", } path = SecurityCenterClient.finding_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_finding_path(path) assert expected == actual def test_notification_config_path(): organization = "scallop" notification_config = "abalone" expected = "organizations/{organization}/notificationConfigs/{notification_config}".format(organization=organization, notification_config=notification_config, ) actual = SecurityCenterClient.notification_config_path(organization, notification_config) assert expected == actual def test_parse_notification_config_path(): expected = { "organization": "squid", "notification_config": "clam", } path = SecurityCenterClient.notification_config_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_notification_config_path(path) assert expected == actual def test_organization_settings_path(): organization = "whelk" expected = "organizations/{organization}/organizationSettings".format(organization=organization, ) actual = SecurityCenterClient.organization_settings_path(organization) assert expected == actual def test_parse_organization_settings_path(): expected = { "organization": "octopus", } path = SecurityCenterClient.organization_settings_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_organization_settings_path(path) assert expected == actual def test_security_marks_path(): organization = "oyster" asset = "nudibranch" expected = "organizations/{organization}/assets/{asset}/securityMarks".format(organization=organization, asset=asset, ) actual = SecurityCenterClient.security_marks_path(organization, asset) assert expected == actual def test_parse_security_marks_path(): expected = { "organization": "cuttlefish", "asset": "mussel", } path = SecurityCenterClient.security_marks_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_security_marks_path(path) assert expected == actual def test_source_path(): organization = "winkle" source = "nautilus" expected = "organizations/{organization}/sources/{source}".format(organization=organization, source=source, ) actual = SecurityCenterClient.source_path(organization, source) assert expected == actual def test_parse_source_path(): expected = { "organization": "scallop", "source": "abalone", } path = SecurityCenterClient.source_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_source_path(path) assert expected == actual def test_topic_path(): project = "squid" topic = "clam" expected = "projects/{project}/topics/{topic}".format(project=project, topic=topic, ) actual = SecurityCenterClient.topic_path(project, topic) assert expected == actual def test_parse_topic_path(): expected = { "project": "whelk", "topic": "octopus", } path = SecurityCenterClient.topic_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_topic_path(path) assert expected == actual def test_common_billing_account_path(): billing_account = "oyster" expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) actual = SecurityCenterClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "nudibranch", } path = SecurityCenterClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "cuttlefish" expected = "folders/{folder}".format(folder=folder, ) actual = SecurityCenterClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "mussel", } path = SecurityCenterClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "winkle" expected = "organizations/{organization}".format(organization=organization, ) actual = SecurityCenterClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nautilus", } path = SecurityCenterClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "scallop" expected = "projects/{project}".format(project=project, ) actual = SecurityCenterClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "abalone", } path = SecurityCenterClient.common_project_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "squid" location = "clam" expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) actual = SecurityCenterClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "whelk", "location": "octopus", } path = SecurityCenterClient.common_location_path(**expected) # Check that the path construction is reversible. actual = SecurityCenterClient.parse_common_location_path(path) assert expected == actual def test_client_withDEFAULT_CLIENT_INFO(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object(transports.SecurityCenterTransport, '_prep_wrapped_messages') as prep: client = SecurityCenterClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object(transports.SecurityCenterTransport, '_prep_wrapped_messages') as prep: transport_class = SecurityCenterClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info)
apache-2.0
2,007,172,058,778,192,100
37.301379
241
0.652636
false
rwisecar/data-structures
src/dll.py
1
3472
"""Create a Doubly linked list.""" class Node(object): """Create node to push into Doubly link list.""" def __init__(self, value=None, next_node=None, previous_node=None): """Create node to push into Doubly link list.""" self.value = value self.next_node = next_node self.previous_node = previous_node class DoubleLink(object): """Create a doubly linked list.""" def __init__(self, head=None, iterable=None): """Create an instance of a doubly linked list.""" self._length = 0 self.head = None self.tail = None if iterable and hasattr(iterable, "__iter__"): for value in iterable: self.push(value) elif iterable: raise TypeError elif head and not iterable: self.push(head) def push(self, value): """Add new value to the front of a doubly linked list.""" new_node = Node(value) if self.head: new_node.next_node = self.head self.head.previous_node = new_node self.head = new_node else: self.head = new_node self.tail = new_node self._length += 1 def append(self, value): """Add a new value to the end of a doubly linked list.""" new_node = Node(value) if self.tail: self.tail.next_node = new_node new_node.previous_node = self.tail self.tail = new_node else: self.head = new_node self.tail = new_node self._length += 1 def pop(self): """Remove and return head of a dll.""" if self.head is None: raise IndexError("Cannot pop from an empty list.") next_node = self.head.next_node old_head = self.head if next_node: next_node.previous_node = None self.head = next_node self._length = self._length - 1 if self._length < 1: self.tail = None return old_head.value def shift(self): """Remove and return the tail of a dll.""" if self.tail is None: raise IndexError("Cannot shift from an empty list.") prev_node = self.tail.previous_node old_tail = self.tail if prev_node: self.tail.next_node = None self.tail = prev_node self._length = self._length - 1 if self._length < 1: self.head = None return old_tail.value def remove(self, val): """Input a value and remove a node with that value from the list.""" try: current_value = self.head.value current_node = self.head while current_node: if current_node.value == val: if current_value is self.head.value: self.pop() elif current_value is self.tail.value: self.shift() else: current_node.previous_node.next_node = current_node.next_node current_node.next_node.previous_node = current_node.previous_node self._length -= 1 break else: current_value = current_node.next_node.value current_node = current_node.next_node except AttributeError: raise ValueError('{} not in list.'.format(val))
mit
-5,819,471,569,239,234,000
32.708738
89
0.528226
false
evrenesat/genesis
genesis/com/views.py
1
3447
# -*- coding: utf-8 -*- from decimal import Decimal from django.contrib.auth.decorators import login_required from django.db import connection from django.http import HttpResponse from django.http import JsonResponse from django.shortcuts import render from django.utils.translation import ugettext_lazy as _ # Create your views here. from com.models import Invoice, Payment from com.num2text import num2text from lab.models import Admission # from num2words.lang_TR import Num2Word_TR as N2W TAX_RATE = Decimal(8) def add_tax_to(admission, invoice): pricing = admission.admissionpricing invoice.amount = pricing.list_price if pricing.tax_included: invoice.total = pricing.final_amount invoice.subtotal = (pricing.final_amount * Decimal(100)) / (Decimal(100) + TAX_RATE) else: # if pricing.final_amount != pricing.list_price: # pre-tax discount applied invoice.subtotal = pricing.final_amount invoice.total = (invoice.subtotal * (100 + TAX_RATE)) / 100 payment = Payment.objects.get(admission=admission, method=10, type=10) payment.amount = -invoice.total payment.save() invoice.tax = invoice.total - invoice.subtotal invoice.discount = invoice.subtotal - invoice.amount invoice.save() def prepare_invoice_for(admission): invoice = None error = None if not admission.patient.address: error = _('Customer address is missing') if error is None: invoice_set = list(admission.invoice_set.all()) if invoice_set: invoice = invoice_set[0] add_tax_to(admission, invoice) else: invoice = Invoice(name=admission.patient.full_name(50), address=admission.patient.address) add_tax_to(admission, invoice) invoice.admission.add(admission) for item in admission.invoiceitem_set.all(): if not item.invoice: item.invoice = invoice item.save() return invoice, error @login_required def invoice_id_of_admission(request, pk): admission = Admission.objects.get(pk=pk) invoice_set = list(admission.invoice_set.all()) if invoice_set: invoice_id = invoice_set[0].id else: invoice_id = 0 return JsonResponse({'id': invoice_id}) @login_required def print_invoice_by_admission(request, pk): admission = Admission.objects.get(pk=pk) invoice, error = prepare_invoice_for(admission) return print_invoice(invoice, request, error) @login_required def print_invoice_by_id(request, pk): invoice = Invoice.objects.get(pk=pk) return print_invoice(invoice, request) @login_required def next_invoice_id(request): c = connection.cursor() c.execute("select nextval('com_invoice_id_seq');") next_invoice_id = c.fetchone()[0] return JsonResponse({'id': next_invoice_id}) def print_invoice(invoice, request, error=None): if error: return render(request, 'invoice.html', {'error': error}) integ, decim = str(invoice.total).split('.') text_total_int = num2text(int(integ)) text_total_decimal = num2text(int(decim)) return render(request, 'invoice.html', { 'error': error, # 'admission': admission, 'items': invoice.invoiceitem_set.all(), 'invoice': invoice, 'text_total_int': text_total_int, 'text_total_decimal': text_total_decimal, })
gpl-3.0
227,998,289,184,827,260
31.828571
92
0.664056
false
davidbroadwater/nyc-subway-datascience-project
project_2/fix_turnstile_data/fix_turnstile_data.py
1
2150
import csv import pandas as pd def fix_turnstile_data(filenames): ''' Filenames is a list of MTA Subway turnstile text files. A link to an example MTA Subway turnstile text file can be seen at the URL below: http://web.mta.info/developers/data/nyct/turnstile/turnstile_110507.txt As you can see, there are numerous data points included in each row of the a MTA Subway turnstile text file. You want to write a function that will update each row in the text file so there is only one entry per row. A few examples below: A002,R051,02-00-00,05-28-11,00:00:00,REGULAR,003178521,001100739 A002,R051,02-00-00,05-28-11,04:00:00,REGULAR,003178541,001100746 A002,R051,02-00-00,05-28-11,08:00:00,REGULAR,003178559,001100775 Write the updates to a different text file in the format of "updated_" + filename. For example: 1) if you read in a text file called "turnstile_110521.txt" 2) you should write the updated data to "updated_turnstile_110521.txt" The order of the fields should be preserved. You can see a sample of the turnstile text file that's passed into this function and the the corresponding updated file in the links below: Sample input file: https://www.dropbox.com/s/mpin5zv4hgrx244/turnstile_110528.txt Sample updated file: https://www.dropbox.com/s/074xbgio4c39b7h/solution_turnstile_110528.txt ''' for name in filenames: with open('updated_'+name, 'wb') as w: writer = csv.writer(w) with open(name, 'rb') as f: reader = csv.reader(f) for row in reader: current_row = row headers = current_row[0:3] columns = current_row[3:] groups = list(zip (*[iter(columns)]*5)) for row in groups: writer.writerow(headers + list(row)) f.close() w.close() if __name__ == "__main__": input_files = ['turnstile_110528.txt', 'turnstile_110604.txt'] fix_turnstile_data(input_files)
mit
8,711,111,627,422,009,000
36.068966
86
0.625116
false
ONEcampaign/humanitarian-data-service
resources/data/raw/example/transform_scripts/parse_acled_all_africa.py
1
1542
import re import pandas as pd ACLED_FILE = 'ACLED-All-Africa-File_20170101-to-20170429.csv' def clean_and_save(): encoding_key = 'iso-8859-1' df = pd.read_csv(ACLED_FILE, encoding=encoding_key) print df.head() print df.columns print df.describe() cleaned_file = 'cleaned_{}'.format(ACLED_FILE) df.to_csv(cleaned_file, encoding='utf-8', index=False) return df, cleaned_file # From a clean file, attempt to derive the country name def derive_cols(cleaned_file): df = pd.read_csv(cleaned_file, encoding='utf-8') print df.head() # string patterns to search for _digits = re.compile('\d') _parens = re.compile(r'^(.*?)(?: \((.*)\))?$') def extract_country(actor): country = None results = re.findall(_parens, actor) if results: descript, country = results[0] if bool(_digits.search(country)): # here it's probably a year, not a country # try to get last word of first string as proxy for region country = descript.split()[-1] return country.strip() df['extracted_country_or_region'] = df['ACTOR1'].apply(extract_country) print df.head() derived_file = 'derived_{}'.format(ACLED_FILE) df.to_csv(derived_file, encoding='utf-8', index=False) return df, derived_file def run(): print 'Transforming ACLED data...' cleaned_df, cleaned_file = clean_and_save() derived_df, derived_file = derive_cols(cleaned_file) print 'Done!' run()
mit
-3,997,759,721,461,324,300
28.653846
75
0.616732
false
mortentoo/tools
pipeline/prman_jobTime.py
1
1815
''' Sums the total elapsed time of all rendered images of a Pixar Renderman batch job. It reads the data from the job XML files, so these are required! Run in command line: python prman_jobTime.py /path/to/jobFolder ''' import os, sys import xml.etree.ElementTree as ET args = sys.argv[1:] def readRenderTime(file): xml = ET.parse(file) root = xml.getroot() timers = root.findall(".//*[@name='totaltime']/elapsed") for timer in timers: return float(timer.text) def updateProgress(count, total): p = count / float(total) * 100 sys.stdout.write('\r') sys.stdout.write("processing files: %d%%" % (p)) sys.stdout.flush() def process(): if len(args) != 1: print("You need to specify path as argument!") else: total = 0 xmlFiles = [] rendertimes = [] for root, dirs, files in os.walk(args[0]): for file in files: if file.endswith(".xml"): xmlFiles.append(os.path.join(root, file)) total += 1 for i, path in enumerate(xmlFiles): rendertimes.append(readRenderTime(path)) updateProgress(i + 1, total) if len(rendertimes) == 0: print("No data found!") return # header print("\n\n%15s %10s %15s" % ("", "h.mm.ss", "seconds")) # divider print("------------------------------------------") # total in h:mm:ss format secondsTotal = sum(rendertimes) m, s = divmod(secondsTotal, 60) h, m = divmod(m, 60) print("%-15s %4d:%02d:%02d %15.3f" % ("job:", h, m, s, secondsTotal)) # average in h:mm:ss format secondsAvg = secondsTotal / len(rendertimes) m, s = divmod(secondsAvg, 60) h, m = divmod(m, 60) print("%-15s %4d:%02d:%02d %15.3f" % ("average:", h, m, s, secondsAvg)) # divider print("------------------------------------------") # number of images print("total images: %d\n" % (len(rendertimes))) process()
gpl-3.0
-5,149,303,020,670,490,000
22.571429
83
0.608815
false
tboyce1/home-assistant
homeassistant/components/cover/tahoma.py
2
2534
""" Support for Tahoma cover - shutters etc. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/cover.tahoma/ """ import logging from datetime import timedelta from homeassistant.components.cover import CoverDevice from homeassistant.components.tahoma import ( DOMAIN as TAHOMA_DOMAIN, TahomaDevice) DEPENDENCIES = ['tahoma'] _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=60) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up Tahoma covers.""" controller = hass.data[TAHOMA_DOMAIN]['controller'] devices = [] for device in hass.data[TAHOMA_DOMAIN]['devices']['cover']: devices.append(TahomaCover(device, controller)) add_devices(devices, True) class TahomaCover(TahomaDevice, CoverDevice): """Representation a Tahoma Cover.""" def update(self): """Update method.""" self.controller.get_states([self.tahoma_device]) @property def current_cover_position(self): """ Return current position of cover. 0 is closed, 100 is fully open. """ try: position = 100 - \ self.tahoma_device.active_states['core:ClosureState'] if position <= 5: return 0 if position >= 95: return 100 return position except KeyError: return None def set_cover_position(self, position, **kwargs): """Move the cover to a specific position.""" self.apply_action('setPosition', 100 - position) @property def is_closed(self): """Return if the cover is closed.""" if self.current_cover_position is not None: return self.current_cover_position == 0 @property def device_class(self): """Return the class of the device.""" if self.tahoma_device.type == 'io:WindowOpenerVeluxIOComponent': return 'window' else: return None def open_cover(self, **kwargs): """Open the cover.""" self.apply_action('open') def close_cover(self, **kwargs): """Close the cover.""" self.apply_action('close') def stop_cover(self, **kwargs): """Stop the cover.""" if self.tahoma_device.type == \ 'io:RollerShutterWithLowSpeedManagementIOComponent': self.apply_action('setPosition', 'secured') else: self.apply_action('stopIdentify')
apache-2.0
1,365,016,466,210,322,000
28.126437
74
0.617206
false
jamespcole/home-assistant
homeassistant/components/amazon_polly/tts.py
1
6758
""" Support for the Amazon Polly text to speech service. For more details about this component, please refer to the documentation at https://home-assistant.io/components/tts.amazon_polly/ """ import logging import voluptuous as vol from homeassistant.components.tts import PLATFORM_SCHEMA, Provider import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['boto3==1.9.16'] _LOGGER = logging.getLogger(__name__) CONF_REGION = 'region_name' CONF_ACCESS_KEY_ID = 'aws_access_key_id' CONF_SECRET_ACCESS_KEY = 'aws_secret_access_key' CONF_PROFILE_NAME = 'profile_name' ATTR_CREDENTIALS = 'credentials' DEFAULT_REGION = 'us-east-1' SUPPORTED_REGIONS = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ca-central-1', 'eu-west-1', 'eu-central-1', 'eu-west-2', 'eu-west-3', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-2', 'ap-northeast-1', 'ap-south-1', 'sa-east-1'] CONF_VOICE = 'voice' CONF_OUTPUT_FORMAT = 'output_format' CONF_SAMPLE_RATE = 'sample_rate' CONF_TEXT_TYPE = 'text_type' SUPPORTED_VOICES = [ 'Zhiyu', # Chinese 'Mads', 'Naja', # Danish 'Ruben', 'Lotte', # Dutch 'Russell', 'Nicole', # English Austrailian 'Brian', 'Amy', 'Emma', # English 'Aditi', 'Raveena', # English, Indian 'Joey', 'Justin', 'Matthew', 'Ivy', 'Joanna', 'Kendra', 'Kimberly', 'Salli', # English 'Geraint', # English Welsh 'Mathieu', 'Celine', 'Lea', # French 'Chantal', # French Canadian 'Hans', 'Marlene', 'Vicki', # German 'Aditi', # Hindi 'Karl', 'Dora', # Icelandic 'Giorgio', 'Carla', 'Bianca', # Italian 'Takumi', 'Mizuki', # Japanese 'Seoyeon', # Korean 'Liv', # Norwegian 'Jacek', 'Jan', 'Ewa', 'Maja', # Polish 'Ricardo', 'Vitoria', # Portuguese, Brazilian 'Cristiano', 'Ines', # Portuguese, European 'Carmen', # Romanian 'Maxim', 'Tatyana', # Russian 'Enrique', 'Conchita', 'Lucia', # Spanish European 'Mia', # Spanish Mexican 'Miguel', 'Penelope', # Spanish US 'Astrid', # Swedish 'Filiz', # Turkish 'Gwyneth', # Welsh ] SUPPORTED_OUTPUT_FORMATS = ['mp3', 'ogg_vorbis', 'pcm'] SUPPORTED_SAMPLE_RATES = ['8000', '16000', '22050'] SUPPORTED_SAMPLE_RATES_MAP = { 'mp3': ['8000', '16000', '22050'], 'ogg_vorbis': ['8000', '16000', '22050'], 'pcm': ['8000', '16000'], } SUPPORTED_TEXT_TYPES = ['text', 'ssml'] CONTENT_TYPE_EXTENSIONS = { 'audio/mpeg': 'mp3', 'audio/ogg': 'ogg', 'audio/pcm': 'pcm', } DEFAULT_VOICE = 'Joanna' DEFAULT_OUTPUT_FORMAT = 'mp3' DEFAULT_TEXT_TYPE = 'text' DEFAULT_SAMPLE_RATES = { 'mp3': '22050', 'ogg_vorbis': '22050', 'pcm': '16000', } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_REGION, default=DEFAULT_REGION): vol.In(SUPPORTED_REGIONS), vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string, vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string, vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string, vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORTED_VOICES), vol.Optional(CONF_OUTPUT_FORMAT, default=DEFAULT_OUTPUT_FORMAT): vol.In(SUPPORTED_OUTPUT_FORMATS), vol.Optional(CONF_SAMPLE_RATE): vol.All(cv.string, vol.In(SUPPORTED_SAMPLE_RATES)), vol.Optional(CONF_TEXT_TYPE, default=DEFAULT_TEXT_TYPE): vol.In(SUPPORTED_TEXT_TYPES), }) def get_engine(hass, config): """Set up Amazon Polly speech component.""" output_format = config.get(CONF_OUTPUT_FORMAT) sample_rate = config.get( CONF_SAMPLE_RATE, DEFAULT_SAMPLE_RATES[output_format]) if sample_rate not in SUPPORTED_SAMPLE_RATES_MAP.get(output_format): _LOGGER.error("%s is not a valid sample rate for %s", sample_rate, output_format) return None config[CONF_SAMPLE_RATE] = sample_rate import boto3 profile = config.get(CONF_PROFILE_NAME) if profile is not None: boto3.setup_default_session(profile_name=profile) aws_config = { CONF_REGION: config.get(CONF_REGION), CONF_ACCESS_KEY_ID: config.get(CONF_ACCESS_KEY_ID), CONF_SECRET_ACCESS_KEY: config.get(CONF_SECRET_ACCESS_KEY), } del config[CONF_REGION] del config[CONF_ACCESS_KEY_ID] del config[CONF_SECRET_ACCESS_KEY] polly_client = boto3.client('polly', **aws_config) supported_languages = [] all_voices = {} all_voices_req = polly_client.describe_voices() for voice in all_voices_req.get('Voices'): all_voices[voice.get('Id')] = voice if voice.get('LanguageCode') not in supported_languages: supported_languages.append(voice.get('LanguageCode')) return AmazonPollyProvider( polly_client, config, supported_languages, all_voices) class AmazonPollyProvider(Provider): """Amazon Polly speech api provider.""" def __init__(self, polly_client, config, supported_languages, all_voices): """Initialize Amazon Polly provider for TTS.""" self.client = polly_client self.config = config self.supported_langs = supported_languages self.all_voices = all_voices self.default_voice = self.config.get(CONF_VOICE) self.name = 'Amazon Polly' @property def supported_languages(self): """Return a list of supported languages.""" return self.supported_langs @property def default_language(self): """Return the default language.""" return self.all_voices.get(self.default_voice).get('LanguageCode') @property def default_options(self): """Return dict include default options.""" return {CONF_VOICE: self.default_voice} @property def supported_options(self): """Return a list of supported options.""" return [CONF_VOICE] def get_tts_audio(self, message, language=None, options=None): """Request TTS file from Polly.""" voice_id = options.get(CONF_VOICE, self.default_voice) voice_in_dict = self.all_voices.get(voice_id) if language != voice_in_dict.get('LanguageCode'): _LOGGER.error("%s does not support the %s language", voice_id, language) return None, None resp = self.client.synthesize_speech( OutputFormat=self.config[CONF_OUTPUT_FORMAT], SampleRate=self.config[CONF_SAMPLE_RATE], Text=message, TextType=self.config[CONF_TEXT_TYPE], VoiceId=voice_id ) return (CONTENT_TYPE_EXTENSIONS[resp.get('ContentType')], resp.get('AudioStream').read())
apache-2.0
5,488,094,041,616,760,000
31.334928
78
0.625925
false
cristianav/PyDEX
share/lib/pydex/pydex_dialogs.py
1
15248
# -*- coding: utf-8 -*- # # pydex_dialogs.py # # Copyright Cristian Navalici ncristian [at] lemonsoftware.eu # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. import Queue from threading import Thread from urllib import urlencode from urllib2 import HTTPError, Request, URLError, urlopen import wx import wx.grid as gridlib TextEventType = wx.NewEventType() EVT_THREAD_TEXT_EVENT = wx.PyEventBinder(TextEventType, 1) import controller # ------------------------------------------------------------------------------------------------- # DB UPDATE DIALOG # ------------------------------------------------------------------------------------------------- class DbUpdateDlg(wx.Dialog): """update the database""" def __init__(self, parent=None): wx.Dialog.__init__(self, parent, title="Actualizează baza de date", size=(500, 200)) ##self.DBob = dbutils.DBUtils() self.Bind(EVT_THREAD_TEXT_EVENT, self.__update_txt_message) # get last timestamp to check if a major update is needed self.cob = controller.Controller(self) lastupdate = self.cob.get_db_last_update() if not lastupdate: text = "PyDEX a detectat ca baza de date nu a fost niciodata actualizată. Un update major este necesar.\n" text += "În funcţie de performanţele computerului, acest lucru va dura câteva minute bune. " text += "Dacă nu doriţi actualizare directă, descărcaţi baza de date deja existentă " text += "de pe siteul oficial (urmăriţi manualul pt detalii). \n" text += "Continuaţi cu actualizare?" else: text = "Ultimul update: %s" % lastupdate panel = wx.Panel(self, -1) vbox = wx.BoxSizer(wx.VERTICAL) hbox1 = wx.BoxSizer(wx.HORIZONTAL) self.txt_updatemsg = wx.StaticText(panel, -1, label=text) hbox1.Add(self.txt_updatemsg, -1, wx.LEFT, 8) vbox.Add(hbox1, 1, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10) hbox2 = wx.BoxSizer(wx.HORIZONTAL) self.gauge = wx.Gauge(panel, -1, style=wx.GA_HORIZONTAL | wx.GA_SMOOTH) hbox2.Add(self.gauge, -1, wx.LEFT) vbox.Add(hbox2, 1, wx.EXPAND | wx.TOP, 50) hbox3 = wx.BoxSizer(wx.HORIZONTAL) self.btn_update = wx.Button(panel, id=wx.NewId(), label="Actualizează") self.btn_cancel = wx.Button(panel, id=wx.ID_CANCEL) self.Bind(wx.EVT_BUTTON, self.do_update, self.btn_update) self.Bind(wx.EVT_BUTTON, self.on_quit, self.btn_cancel) hbox3.Add(self.btn_update, 0, wx.RIGHT) hbox3.Add(self.btn_cancel, 0, wx.RIGHT) vbox.Add(hbox3, 0, wx.EXPAND | wx.ALL, 10) panel.SetSizer(vbox) def do_update(self, e): self.txt_updatemsg.SetLabel("Nu întrerupeţi această operaţie \n") self.btn_update.Disable() self.btn_cancel.Disable() try: thread = Thread(target=self.start_update_thread) thread.start() #thread.join() # don't do it while thread.is_alive(): wx.Yield() wx.MilliSleep(200) self.gauge.Pulse() else: self.end_update_thread() except Exception, e: print "do_update", e def start_update_thread(self): self.result_update = self.cob.do_db_update(self) #wx.CallAfter(self.end_update_thread()) def end_update_thread(self): try: if self.result_update: msg_str = "Actualizarea a luat sfârşit." else: msg_str = "Actualizarea a eşuat!" wx.CallAfter(self.txt_updatemsg.SetLabel, msg_str) self.btn_cancel.Enable() self.gauge.Hide() except Exception, e: print "end_update_thread", e def __update_txt_message(self, evt): self.txt_updatemsg.SetLabel(evt.getText()) def on_quit(self, e): self.Destroy() # ------------------------------------------------------------------------------------------------- # SETTINGS DIALOG # ------------------------------------------------------------------------------------------------- class SettingsDlg(wx.Dialog): """settings dialog""" def __init__(self, parent): wx.Dialog.__init__(self, parent, title="Setări", size=(600, 400)) panel = wx.Panel(self, -1) vbox = wx.BoxSizer(wx.VERTICAL) hbox1 = wx.BoxSizer(wx.HORIZONTAL) st_credentials = wx.StaticText(panel, -1, 'Date de conectare pe dexonline.ro (creaţi-vă mai întâi un cont pe site)') hbox1.Add(st_credentials, 1, wx.LEFT, 8) vbox.Add(hbox1, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10) hbox2 = wx.BoxSizer(wx.HORIZONTAL) st_username = wx.StaticText(panel, 0, 'Utilizator:') txt_username = wx.TextCtrl(panel, size=(120, -1)) st_password = wx.StaticText(panel, 0, 'Parolă:') txt_password = wx.TextCtrl(panel, size=(120, -1), style=wx.TE_PASSWORD) btn_check_cred = wx.Button(panel, id=wx.NewId(), label="Verifică datele") self.Bind(wx.EVT_BUTTON, self.check_credentials_onsite, btn_check_cred) hbox2.Add(st_username, 0, wx.LEFT, 8) hbox2.Add(txt_username, 0, wx.LEFT, 8) hbox2.Add(st_password, 0, wx.LEFT, 8) hbox2.Add(txt_password, 0, wx.LEFT, 8) hbox2.Add(btn_check_cred, 0, wx.LEFT, 8) vbox.Add(hbox2, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10) hbox3 = wx.BoxSizer(wx.HORIZONTAL) st_result_credential = wx.StaticText(panel, wx.ID_ANY, label="", style=wx.ALIGN_CENTER) hbox3.Add(st_result_credential, 1, wx.LEFT) vbox.Add(hbox3, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10) panel.SetSizer(vbox) # use these globally self.txt_username = txt_username self.txt_password = txt_password self.result_credential = st_result_credential def check_credentials_onsite(self, ev): """checks the entered credentials on dexonline.ro website""" try: req = Request("http://dexonline.ro/login") data = urlencode([('password', self.txt_password.GetValue()), ('email', self.txt_username.GetValue()), ('login', 'Conectare')]) response = urlopen(req, data) #print response.geturl() # debug #print response.info() # debug login = response.read().find('flashMessage errorType') # look for this string in the page, might be changed over time! if login != -1: self.result_credential.SetLabel("Logarea a eşuat. Verificaţi datele de access.") else: self.result_credential.SetLabel("Logarea s-a realizat cu success.") except HTTPError, e: dlg = wx.MessageDialog(self, 'Serverul nu a putut îndeplini cererea. Cod eroare: %s' % e.code, "Eroare", wx.OK | wx.ICON_WARNING) dlg.ShowModal() except URLError, e: dlg = wx.MessageDialog(self, 'Nu reuşim să contactăm serverul. Motiv: %s' % e.reason, "Eroare", wx.OK | wx.ICON_WARNING) dlg.ShowModal() except Exception, e: dlg = wx.MessageDialog(self, 'Eroare: %s' % e, "Eroare", wx.OK | wx.ICON_WARNING) dlg.ShowModal() # ------------------------------------------------------------------------------------------------- # CUSTOM EVENT - used to communicate from Thread to Main GUI Window # ------------------------------------------------------------------------------------------------- class NewTextEvent(wx.PyCommandEvent): def __init__(self, evtType, id): wx.PyCommandEvent.__init__(self, evtType, id) self.msg = '' def setText(self, text): self.msg = text def getText(self): return self.msg # ------------------------------------------------------------------------------------------------- # SOURCES (ACRONIMS) DIALOG # ------------------------------------------------------------------------------------------------- class SourcesDlg(wx.Dialog): """acronims dialog""" def __init__(self, parent, sources): wx.Dialog.__init__(self, parent, title="Acronime", size=(1000, 800), style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER) panel = wx.Panel(self, -1, style=0) grid = SourcesTableGrid(panel, sources) bs = wx.BoxSizer(wx.VERTICAL) bs.Add(grid, 1, wx.GROW|wx.ALL, 5) panel.SetSizer(bs) def on_quit(self, e): self.Destroy() class SourcesTableGrid(gridlib.Grid): def __init__(self, parent, tbldata): gridlib.Grid.__init__(self, parent, -1) table = SourcesTable(tbldata) # The second parameter means that the grid is to take ownership of the # table and will destroy it when done. Otherwise you would need to keep # a reference to it and call it's Destroy method later. self.SetTable(table, True) self.SetRowLabelSize(0) self.SetMargins(0,0) self.AutoSizeColumns(False) self.EnableEditing(False) class SourcesTable(gridlib.PyGridTableBase): def __init__(self, tbldata): gridlib.PyGridTableBase.__init__(self) self.colLabels = ['Id', 'Acronim', 'Definiţie', 'Autor', 'Editura', 'An'] self.dataTypes = [ gridlib.GRID_VALUE_NUMBER, gridlib.GRID_VALUE_STRING, gridlib.GRID_VALUE_STRING, gridlib.GRID_VALUE_STRING, gridlib.GRID_VALUE_NUMBER ] self.data = tbldata #-------------------------------------------------- # required methods for the wx.PyGridTableBase interface def GetNumberRows(self): return len(self.data) + 1 def GetNumberCols(self): return len(self.data[0]) def IsEmptyCell(self, row, col): try: return not self.data[row][col] except IndexError: return True # Get/Set values in the table. The Python version of these # methods can handle any data-type, (as long as the Editor and # Renderer understands the type too,) not just strings as in the # C++ version. def GetValue(self, row, col): try: return self.data[row][col] except IndexError: return '' def SetValue(self, row, col, value): def innerSetValue(row, col, value): try: self.data[row][col] = value except IndexError: # add a new row self.data.append([''] * self.GetNumberCols()) innerSetValue(row, col, value) # tell the grid we've added a row msg = gridlib.GridTableMessage(self, # The table gridlib.GRIDTABLE_NOTIFY_ROWS_APPENDED, # what we did to it 1 # how many ) self.GetView().ProcessTableMessage(msg) innerSetValue(row, col, value) # OPTIONALS METHODS # Called when the grid needs to display labels def GetColLabelValue(self, col): return self.colLabels[col] # ------------------------------------------------------------------------------------------------- # ABBREVIATIONS DIALOG # ------------------------------------------------------------------------------------------------- class AbbrevsDlg(wx.Dialog): '''abbreviations dialog''' def __init__(self, parent, abbrevs): wx.Dialog.__init__(self, parent, title="Abrevieri", size=(1000, 800), style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER) panel = wx.Panel(self, -1, style=0) grid = AbbrevsTableGrid(panel, abbrevs) bs = wx.BoxSizer(wx.VERTICAL) bs.Add(grid, 1, wx.GROW|wx.ALL, 5) panel.SetSizer(bs) def on_quit(self, e): self.Destroy() class AbbrevsTableGrid(gridlib.Grid): def __init__(self, parent, tbldata): gridlib.Grid.__init__(self, parent, -1) table = AbrevsTable(tbldata) # The second parameter means that the grid is to take ownership of the # table and will destroy it when done. Otherwise you would need to keep # a reference to it and call it's Destroy method later. self.SetTable(table, True) self.SetRowLabelSize(0) self.SetMargins(0,0) self.AutoSizeColumns(True) self.EnableEditing(False) class AbrevsTable(gridlib.PyGridTableBase): def __init__(self, tbldata): gridlib.PyGridTableBase.__init__(self) self.colLabels = ['Numele secţiunii', 'Abreviere scurtă', 'Abreviere', 'Ambiguă?'] self.dataTypes = [ gridlib.GRID_VALUE_STRING, gridlib.GRID_VALUE_STRING, gridlib.GRID_VALUE_STRING, gridlib.GRID_VALUE_NUMBER ] self.data = tbldata #-------------------------------------------------- # required methods for the wx.PyGridTableBase interface def GetNumberRows(self): return len(self.data) + 1 def GetNumberCols(self): return len(self.data[0]) def IsEmptyCell(self, row, col): try: return not self.data[row][col] except IndexError: return True # Get/Set values in the table. The Python version of these # methods can handle any data-type, (as long as the Editor and # Renderer understands the type too,) not just strings as in the # C++ version. def GetValue(self, row, col): try: return self.data[row][col] except IndexError: return '' def SetValue(self, row, col, value): def innerSetValue(row, col, value): try: self.data[row][col] = value except IndexError: # add a new row self.data.append([''] * self.GetNumberCols()) innerSetValue(row, col, value) # tell the grid we've added a row msg = gridlib.GridTableMessage(self, # The table gridlib.GRIDTABLE_NOTIFY_ROWS_APPENDED, # what we did to it 1 # how many ) self.GetView().ProcessTableMessage(msg) innerSetValue(row, col, value) # OPTIONALS METHODS # Called when the grid needs to display labels def GetColLabelValue(self, col): return self.colLabels[col] #EOF
gpl-3.0
8,644,906,239,336,999,000
35.295943
141
0.555957
false
felipenaselva/felipe.repository
script.module.resolveurl/lib/resolveurl/plugins/lib/helpers.py
1
9811
""" ResolveURL Addon for Kodi Copyright (C) 2016 t0mm0, tknorris This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re import urllib import xbmcgui import jsunpack from urlparse import urlparse from resolveurl import common from resolveurl.resolver import ResolverError def get_hidden(html, form_id=None, index=None, include_submit=True): hidden = {} if form_id: pattern = '''<form [^>]*(?:id|name)\s*=\s*['"]?%s['"]?[^>]*>(.*?)</form>''' % (form_id) else: pattern = '''<form[^>]*>(.*?)</form>''' html = cleanse_html(html) for i, form in enumerate(re.finditer(pattern, html, re.DOTALL | re.I)): common.logger.log(form.group(1)) if index is None or i == index: for field in re.finditer('''<input [^>]*type=['"]?hidden['"]?[^>]*>''', form.group(1)): match = re.search('''name\s*=\s*['"]([^'"]+)''', field.group(0)) match1 = re.search('''value\s*=\s*['"]([^'"]*)''', field.group(0)) if match and match1: hidden[match.group(1)] = match1.group(1) if include_submit: match = re.search('''<input [^>]*type=['"]?submit['"]?[^>]*>''', form.group(1)) if match: name = re.search('''name\s*=\s*['"]([^'"]+)''', match.group(0)) value = re.search('''value\s*=\s*['"]([^'"]*)''', match.group(0)) if name and value: hidden[name.group(1)] = value.group(1) common.logger.log_debug('Hidden fields are: %s' % (hidden)) return hidden def pick_source(sources, auto_pick=None): if auto_pick is None: auto_pick = common.get_setting('auto_pick') == 'true' if len(sources) == 1: return sources[0][1] elif len(sources) > 1: if auto_pick: return sources[0][1] else: result = xbmcgui.Dialog().select(common.i18n('choose_the_link'), [str(source[0]) if source[0] else 'Unknown' for source in sources]) if result == -1: raise ResolverError(common.i18n('no_link_selected')) else: return sources[result][1] else: raise ResolverError(common.i18n('no_video_link')) def append_headers(headers): return '|%s' % '&'.join(['%s=%s' % (key, urllib.quote_plus(headers[key])) for key in headers]) def get_packed_data(html): packed_data = '' for match in re.finditer('(eval\s*\(function.*?)</script>', html, re.DOTALL | re.I): try: js_data = jsunpack.unpack(match.group(1)) js_data = js_data.replace('\\', '') packed_data += js_data except: pass return packed_data def parse_sources_list(html): sources = [] match = re.search('''['"]?sources['"]?\s*:\s*\[(.*?)\]''', html, re.DOTALL) if match: sources = [(match[1], match[0].replace('\/', '/')) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', match.group(1), re.DOTALL)] return sources def parse_html5_source_list(html): label_attrib = 'type' if not re.search('''<source\s+src\s*=.*?data-res\s*=.*?/\s*>''', html) else 'data-res' sources = [(match[1], match[0].replace('\/', '/')) for match in re.findall('''<source\s+src\s*=\s*['"]([^'"]+)['"](?:.*?''' + label_attrib + '''\s*=\s*['"](?:video/)?([^'"]+)['"])''', html, re.DOTALL)] return sources def parse_smil_source_list(smil): sources = [] base = re.search('base\s*=\s*"([^"]+)', smil).groups()[0] for i in re.finditer('src\s*=\s*"([^"]+)(?:"\s*(?:width|height)\s*=\s*"([^"]+))?', smil): label = 'Unknown' if (len(i.groups()) > 1) and (i.group(2) is not None): label = i.group(2) sources += [(label, '%s playpath=%s' % (base, i.group(1)))] return sources def scrape_sources(html, result_blacklist=None, scheme='http', patterns=None, generic_patterns=True): if patterns is None: patterns = [] def __parse_to_list(_html, regex): _blacklist = ['.jpg', '.jpeg', '.gif', '.png', '.js', '.css', '.htm', '.html', '.php', '.srt', '.sub', '.xml', '.swf', '.vtt', '.mpd'] _blacklist = set(_blacklist + result_blacklist) streams = [] labels = [] for r in re.finditer(regex, _html, re.DOTALL): match = r.groupdict() stream_url = match['url'].replace('&amp;', '&') file_name = urlparse(stream_url[:-1]).path.split('/')[-1] if stream_url.endswith("/") else urlparse(stream_url).path.split('/')[-1] blocked = not file_name or any(item in file_name.lower() for item in _blacklist) if stream_url.startswith('//'): stream_url = scheme + ':' + stream_url if '://' not in stream_url or blocked or (stream_url in streams) or any(stream_url == t[1] for t in source_list): continue label = match.get('label', file_name) if label is None: label = file_name labels.append(label) streams.append(stream_url) matches = zip(labels, streams) if matches: common.logger.log_debug('Scrape sources |%s| found |%s|' % (regex, matches)) return matches if result_blacklist is None: result_blacklist = [] elif isinstance(result_blacklist, str): result_blacklist = [result_blacklist] html = html.replace("\/", "/") html += get_packed_data(html) source_list = [] if generic_patterns or not patterns: source_list += __parse_to_list(html, '''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''') source_list += __parse_to_list(html, '''["']?\s*(?:file|src)\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''') source_list += __parse_to_list(html, '''video[^><]+src\s*[=:]\s*['"](?P<url>[^'"]+)''') source_list += __parse_to_list(html, '''source\s+src\s*=\s*['"](?P<url>[^'"]+)['"](?:.*?res\s*=\s*['"](?P<label>[^'"]+))?''') source_list += __parse_to_list(html, '''["'](?:file|url)["']\s*[:=]\s*["'](?P<url>[^"']+)''') source_list += __parse_to_list(html, '''param\s+name\s*=\s*"src"\s*value\s*=\s*"(?P<url>[^"]+)''') for regex in patterns: source_list += __parse_to_list(html, regex) source_list = list(set(source_list)) common.logger.log(source_list) if len(source_list) > 1: try: source_list.sort(key=lambda x: int(re.sub("\D", "", x[0])), reverse=True) except: common.logger.log_debug('Scrape sources sort failed |int(re.sub("\D", "", x[0])|') try: source_list.sort(key=lambda x: re.sub("[^a-zA-Z]", "", x[0])) except: common.logger.log_debug('Scrape sources sort failed |re.sub("[^a-zA-Z]", "", x[0])|') return source_list def get_media_url(url, result_blacklist=None, patterns=None, generic_patterns=True): if patterns is None: patterns = [] scheme = urlparse(url).scheme if result_blacklist is None: result_blacklist = [] elif isinstance(result_blacklist, str): result_blacklist = [result_blacklist] result_blacklist = list(set(result_blacklist + ['.smil'])) # smil(not playable) contains potential sources, only blacklist when called from here net = common.Net() headers = {'User-Agent': common.RAND_UA} response = net.http_GET(url, headers=headers) response_headers = response.get_headers(as_dict=True) headers.update({'Referer': url}) cookie = response_headers.get('Set-Cookie', None) if cookie: headers.update({'Cookie': cookie}) html = response.content source_list = scrape_sources(html, result_blacklist, scheme, patterns, generic_patterns) source = pick_source(source_list) return source + append_headers(headers) def cleanse_html(html): for match in re.finditer('<!--(.*?)-->', html, re.DOTALL): if match.group(1)[-2:] != '//': html = html.replace(match.group(0), '') html = re.sub('''<(div|span)[^>]+style=["'](visibility:\s*hidden|display:\s*none);?["']>.*?</\\1>''', '', html, re.I | re.DOTALL) return html def get_dom(html, tag): start_str = '<%s' % (tag.lower()) end_str = '</%s' % (tag.lower()) results = [] html = html.lower() while html: start = html.find(start_str) end = html.find(end_str, start) pos = html.find(start_str, start + 1) while pos < end and pos != -1: tend = html.find(end_str, end + len(end_str)) if tend != -1: end = tend pos = html.find(start_str, pos + 1) if start == -1 and end == -1: break elif start > -1 and end > -1: result = html[start:end] elif end > -1: result = html[:end] elif start > -1: result = html[start:] else: break results.append(result) html = html[start + len(start_str):] return results
gpl-2.0
2,584,517,304,195,607,000
41.656522
205
0.537458
false
cgrebeld/pymel
pymel/core/windows.py
1
30564
""" Functions for creating UI elements, as well as their class counterparts. """ import re, sys, functools, traceback import pymel.util as _util import pymel.internal.pmcmds as cmds import pymel.internal.factories as _factories import pymel.internal as _internal import pymel.versions as _versions from language import mel, melGlobals from system import Path as _Path import uitypes as _uitypes if _versions.current() >= _versions.v2011: from uitypes import toQtObject, toQtLayout, toQtControl, toQtMenuItem, toQtWindow _logger = _internal.getLogger(__name__) _thisModule = sys.modules[__name__] # Note - don't do # __import__('pymel.core.windows').XXX # ...as this will get the 'original' module, not the dynamic one! # Do: # import pymel.core.windows; import sys; sys.modules[pymel.core.windows].XXX # instead! thisModuleCmd = "import %s; import sys; sys.modules[%r]" % (__name__, __name__) #----------------------------------------------- # Enhanced UI Commands #----------------------------------------------- def _lsUI( **kwargs ): long = kwargs.pop( 'long', kwargs.pop( 'l', True ) ) head = kwargs.pop( 'head', kwargs.pop( 'hd', None ) ) tail = kwargs.pop( 'tail', kwargs.pop( 'tl', None) ) if not kwargs: kwargs = { 'windows': 1, 'panels' : 1, 'editors' : 1, 'controls' : 1, 'controlLayouts' : 1, 'collection' : 1, 'radioMenuItemCollections' : 1, 'menus' : 1, 'menuItems' : 1, 'contexts' : 0, 'cmdTemplates' : 1 } kwargs['long'] = long if head is not None: kwargs['head'] = head if tail is not None: kwargs['tail'] = tail return _util.listForNone(cmds.lsUI(**kwargs)) # all optionMenus are popupMenus, but not all popupMenus are optionMenus _commandsToUITypes = { 'optionMenu':'popupMenu', } def _findLongName(name, type=None): # this remap is currently for OptionMenu, but the fix only works in 2011 # lsUI won't list popupMenus or optionMenus kwargs = { 'long' : True} if type: kwargs['type'] = _commandsToUITypes.get(type, type) uiObjs = _util.listForNone(_lsUI( **kwargs )) res = [ x for x in uiObjs if x.endswith( '|' + name) ] if len(res) > 1: raise ValueError, "found more than one UI element matching the name %s" % name elif len(res) == 0: raise ValueError, "could not find a UI element matching the name %s" % name return res[0] def lsUI( **kwargs ): """ Modified: - long defaults to True - if no type is passed, defaults to all known types """ return [ _uitypes.PyUI(x) for x in _lsUI( **kwargs ) ] scriptTableCmds = {} def scriptTable(*args, **kwargs): """ Maya Bug Fix: - fixed getCellCmd to work with python functions, previously only worked with mel callbacks IMPORTANT: you cannot use the print statement within the getCellCmd callback function or your values will not be returned to the table """ cb = kwargs.pop('getCellCmd', kwargs.pop('gcc',None) ) cc = kwargs.pop('cellChangedCmd', kwargs.pop('ccc',None) ) uiName = cmds.scriptTable( *args, **kwargs ) if "q" in kwargs or "query" in kwargs: return uiName kwargs.clear() if cb: if hasattr(cb, '__call__'): procName = 'getCellMel%d' % len(scriptTableCmds.keys()) key = '%s_%s' % (uiName,procName) procCmd = """global proc string %s( int $row, int $column ) { return python(%s.scriptTableCmds['%s'](" + $row + "," + $column + ")");} """ % (procName, thisModuleCmd, key) mel.eval( procCmd ) scriptTableCmds[key] = cb # create a scriptJob to clean up the dictionary of functions cmds.scriptJob(uiDeleted=(uiName, lambda *x: scriptTableCmds.pop(key,None))) cb = procName kwargs['getCellCmd'] = cb if cc: if hasattr(cc, '__call__'): procName = 'cellChangedCmd%d' % len(scriptTableCmds.keys()) key = '%s_%s' % (uiName,procName) # Note - don't do # __import__('pymel.core.windows').XXX # ...as this will get the 'original' module, not the dynamic one! # Do: # import pymel.core.windows; import sys; sys.modules[pymel.core.windows].XXX # instead! procCmd = """global proc int %s( int $row, int $column, string $val) { return python("%s.scriptTableCmds['%s'](" + $row + "," + $column + ",'" + $val + "')");} """ % (procName, thisModuleCmd, key) mel.eval( procCmd ) scriptTableCmds[key] = cc # create a scriptJob to clean up the dictionary of functions cmds.scriptJob(uiDeleted=(uiName, lambda *x: scriptTableCmds.pop(key,None))) cc = procName kwargs['cellChangedCmd'] = cc if kwargs: cmds.scriptTable( uiName, e=1, **kwargs) return _uitypes.ScriptTable(uiName) def getPanel(*args, **kwargs): typeOf = kwargs.pop('typeOf', kwargs.pop('to', None) ) if typeOf: # typeOf flag only allows short names kwargs['typeOf'] = typeOf.rsplit('|',1)[-1] return cmds.getPanel(*args, **kwargs ) # # #def textScrollList( *args, **kwargs ): # """ #Modifications: # - returns an empty list when the result is None for queries: selectIndexedItem, allItems, selectItem queries # """ # res = cmds.textScrollList(*args, **kwargs) # return _factories.listForNoneQuery( res, kwargs, [('selectIndexedItem', 'sii'), ('allItems', 'ai'), ('selectItem', 'si',)] ) # #def optionMenu( *args, **kwargs ): # """ #Modifications: # - returns an empty list when the result is None for queries: itemListLong, itemListShort queries # """ # res = cmds.optionMenu(*args, **kwargs) # return _factories.listForNoneQuery( res, kwargs, [('itemListLong', 'ill'), ('itemListShort', 'ils')] ) # #def optionMenuGrp( *args, **kwargs ): # """ #Modifications: # - returns an empty list when the result is None for queries: itemlistLong, itemListShort queries # """ # res = cmds.optionMenuGrp(*args, **kwargs) # return _factories.listForNoneQuery( res, kwargs, [('itemListLong', 'ill'), ('itemListShort', 'ils')] ) # #def modelEditor( *args, **kwargs ): # """ #Modifications: # - casts to PyNode for queries: camera # """ # res = cmds.modelEditor(*args, **kwargs) # if kwargs.get('query', kwargs.get('q')) and kwargs.get( 'camera', kwargs.get('cam')): # import general # return general.PyNode(res) # return res #=============================================================================== # Provides classes and functions to facilitate UI creation in Maya #=============================================================================== class BaseCallback(object): """ Base class for callbacks. """ def __init__(self,func,*args,**kwargs): self.func = func self.args = args self.kwargs = kwargs self.traceback = traceback.format_stack() if _versions.current() >= _versions.v2009: class Callback(BaseCallback): """ Enables deferred function evaluation with 'baked' arguments. Useful where lambdas won't work... It also ensures that the entire callback will be be represented by one undo entry. Example: .. python:: import pymel as pm def addRigger(rigger, **kwargs): print "adding rigger", rigger for rigger in riggers: pm.menuItem( label = "Add " + str(rigger), c = Callback(addRigger,rigger,p=1)) # will run: addRigger(rigger,p=1) """ def __call__(self,*args): cmds.undoInfo(openChunk=1) try: try: return self.func(*self.args, **self.kwargs) except Exception, e: raise _factories.CallbackError(self, e) finally: cmds.undoInfo(closeChunk=1) class CallbackWithArgs(Callback): def __call__(self,*args,**kwargs): # not sure when kwargs would get passed to __call__, # but best not to remove support now kwargsFinal = self.kwargs.copy() kwargsFinal.update(kwargs) cmds.undoInfo(openChunk=1) try: try: return self.func(*self.args + args, **kwargsFinal) except Exception, e: raise _factories.CallbackError(self, e) finally: cmds.undoInfo(closeChunk=1) else: class Callback(BaseCallback): """ Enables deferred function evaluation with 'baked' arguments. Useful where lambdas won't work... Example: .. python:: import pymel as pm def addRigger(rigger, **kwargs): print "adding rigger", rigger for rigger in riggers: pm.menuItem( label = "Add " + str(rigger), c = Callback(addRigger,rigger,p=1)) # will run: addRigger(rigger,p=1) """ # This implementation of the Callback object uses private members # to store static call information so that the call can be made through # a mel call, thus making the entire function call undoable _callData = None @staticmethod def _doCall(): (func, args, kwargs) = Callback._callData Callback._callData = func(*args, **kwargs) def __call__(self,*args): Callback._callData = (self.func, self.args, self.kwargs) try: mel.python("%s.Callback._doCall()" % thisModuleCmd) except Exception, e: raise _factories.CallbackError(self.func, e) return Callback._callData class CallbackWithArgs(Callback): def __call__(self,*args,**kwargs): kwargsFinal = self.kwargs.copy() kwargsFinal.update(kwargs) Callback._callData = (self.func, self.args + args, kwargsFinal) try: mel.python("%s.Callback._doCall()" % thisModuleCmd) except Exception, e: raise _factories.CallbackError(self.func, e) return Callback._callData def verticalLayout(*args, **kwargs): kwargs['orientation'] = 'vertical' return autoLayout(*args, **kwargs) def horizontalLayout(*args, **kwargs): kwargs['orientation'] = 'horizontal' return autoLayout(*args, **kwargs) def promptBox(title, message, okText, cancelText, **kwargs): """ Prompt for a value. Returns the string value or None if cancelled """ ret = promptDialog(t=title, m=message, b=[okText,cancelText], db=okText, cb=cancelText,**kwargs) if ret==okText: return promptDialog(q=1,tx=1) def promptBoxGenerator(*args, **kwargs): """ Keep prompting for values until cancelled """ while 1: ret = promptBox(*args, **kwargs) if not ret: return yield ret def confirmBox(title, message, yes="Yes", no="No", *moreButtons, **kwargs): """ Prompt for confirmation. Returns True/False, unless 'moreButtons' were specified, and then returns the button pressed""" default = kwargs.get("db", kwargs.get("defaultButton")) or yes ret = confirmDialog(t=title, m=message, b=[yes,no] + list(moreButtons), db=default, ma="center", cb=no, ds=no) if moreButtons: return ret else: return (ret==yes) def informBox(title, message, ok="Ok"): """ Information box """ confirmDialog(t=title, m=message, b=["Ok"], db="Ok") class PopupError( Exception ): """Raise this exception in your scripts to cause a promptDialog to be opened displaying the error message. After the user presses 'OK', the exception will be raised as normal. In batch mode the promptDialog is not opened.""" def __init__(self, msg): Exception.__init__(self, msg) if not cmds.about(batch=1): ret = informBox('Error', msg) def promptForFolder(): """ Prompt the user for a folder path """ # a little trick that allows us to change the top-level 'folder' variable from # the nested function ('getfolder') - use a single-element list, and change its content folder = [None] def getfolder(*args): folder[0] = args[0] ret = cmds.fileBrowserDialog(m=4, fc=getfolder, an="Get Folder") folder = _Path(folder[0]) if folder.exists(): return folder def promptForPath(**kwargs): """ Prompt the user for a folder path """ if cmds.about(linux=1): return _Path(fileDialog(**kwargs)) else: # a little trick that allows us to change the top-level 'folder' variable from # the nested function ('getfolder') - use a single-element list, and change its content folder = [None] def getfolder(*args): folder[0] = args[0] kwargs.pop('fileCommand',None) kwargs['fc'] = getfolder kwargs['an'] = kwargs.pop('an', kwargs.pop('actionName', "Select File")) ret = cmds.fileBrowserDialog(**kwargs) folder = _Path(folder[0]) if folder.exists(): return folder def fileDialog(*args, **kwargs): ret = cmds.fileDialog(*args, **kwargs ) if ret: return _Path( ret ) def showsHourglass(func): """ Decorator - shows the hourglass cursor until the function returns """ def decoratedFunc(*args, **kwargs): cmds.waitCursor(st=True) try: return func(*args, **kwargs) finally: cmds.waitCursor(st=False) decoratedFunc.__doc__ = func.__doc__ decoratedFunc.__name__ = func.__name__ decoratedFunc.__module__ = func.__module__ return decoratedFunc def pathButtonGrp( name=None, *args, **kwargs ): if name is None or not cmds.textFieldButtonGrp( name, ex=1 ): create = True else: create = False return _uitypes.PathButtonGrp( name=name, create=create, *args, **kwargs ) def vectorFieldGrp( *args, **kwargs ): return _uitypes.VectorFieldGrp( *args, **kwargs ) def uiTemplate(name=None, force=False, exists=None): if exists: return cmds.uiTemplate(name, exists) else: return _uitypes.UITemplate(name=name, force=force) def currentParent(): "shortcut for ``ui.PyUI(setParent(q=1))`` " return _uitypes.PyUI(cmds.setParent(q=1)) # fix a bug it becomes impossible to create a menu after setParent has been called def menu(*args, **kwargs): """ Modifications - added ability to query parent """ if _versions.current() < _versions.v2011: # on create only if not ( kwargs.get('query', False) or kwargs.get('q', False) ) \ and not ( kwargs.get('edit', False) or kwargs.get('e', False) ) \ and not ( kwargs.get('parent', False) or kwargs.get('p', False) ): kwargs['parent'] = cmds.setParent(q=1) if ( kwargs.get('query', False) or kwargs.get('q', False) ) \ and ( kwargs.get('parent', False) or kwargs.get('p', False) ): name = unicode(args[0]) if '|' not in name: name = _findLongName(name, 'menu') return name.rsplit('|',1)[0] return cmds.menu(*args, **kwargs) def _createClassCommands(): def createCallback( classname ): """ create a callback that will trigger lazyLoading """ def callback(*args, **kwargs): res = getattr(_uitypes, classname)(*args, **kwargs) return res return callback for funcName in _factories.uiClassList: # Create Class classname = _util.capitalize(funcName) #cls = _uitypes[classname] # Create Function func = _factories.functionFactory( funcName, createCallback(classname), _thisModule, uiWidget=True ) if func: func.__module__ = __name__ setattr(_thisModule, funcName, func) def _createOtherCommands(): moduleShortName = __name__.split('.')[-1] nonClassFuncs = set(_factories.moduleCmds[moduleShortName]).difference(_factories.uiClassList) for funcName in nonClassFuncs: func = _factories.functionFactory( funcName, returnFunc=None, module=_thisModule ) if func: func.__module__ = __name__ setattr(_thisModule, funcName, func) # want this call to work regardless of order we call _createClassCommandParis / _createCommands if sys.modules[__name__] != _thisModule: setattr( sys.modules[__name__], funcName, func ) _createClassCommands() _createOtherCommands() def autoLayout(*args, **kwargs): return _uitypes.AutoLayout(*args, **kwargs) autoLayout.__doc__ = formLayout.__doc__ def subMenuItem(*args, **kwargs): """ shortcut for ``menuItem(subMenu=True)`` """ kwargs['subMenu'] = True return menuItem(*args, **kwargs) #class ValueControlGrp( UI ): # def __new__(cls, name=None, create=False, dataType=None, numberOfControls=1, **kwargs): # # if cls._isBeingCreated(name, create, kwargs): # assert dataType # if not isinstance(dataType, basestring): # try: # dataType = dataType.__name__ # except AttributeError: # dataType = str(dataType) # # # if a dataType such as float3 or int2 was passed, get the number of ctrls # try: # numberOfControls = int(re.search( '(\d+)$', dataType ).group(0)) # except: # pass # # dataType = dataType.lower() # # kwargs.pop('dt',None) # kwargs['docTag'] = dataType ## kwargs.pop('nf', None) ## kwargs['numberOfFields'] = 3 ## name = cmds.floatFieldGrp( name, *args, **kwargs) # # #labelStr = kwargs.pop( 'label', kwargs.pop('l', str(dataType) ) ) # if dataType in ["bool"]: # ctrl = _uitypes.CheckBoxGrp # getter = ctrl.getValue1 # setter = ctrl.setValue1 # #if hasDefault: ctrl.setValue1( int(default) ) # # elif dataType in ["int"]: # ctrl = _uitypes.IntFieldGrp # getter = ctrl.getValue1 # setter = ctrl.setValue1 # #if hasDefault: ctrl.setValue1( int(default) ) # # elif dataType in ["float"]: # ctrl = _uitypes.FloatFieldGrp # getter = ctrl.getValue1 # setter = ctrl.setValue1 # #if hasDefault: ctrl.setValue1( float(default) ) # # elif dataType in ["vector", "Vector"]: # ctrl = VectorFieldGrp # getter = ctrl.getVector # setter = ctrl.setValue1 # #if hasDefault: ctrl.setVector( default ) # # elif dataType in ["path", "Path", "FileReference"]:# or pathreg.search( argName.lower() ): # ctrl = PathButtonGrp # getter = ctrl.getPath # setter = ctrl.setPath # #if hasDefault: ctrl.setText( default.__repr__() ) # # elif dataType in ["string", "unicode", "str"]: # ctrl = _uitypes.TextFieldGrp # getter = ctrl.getText # setter = ctrl.setText # #if hasDefault: ctrl.setText( str(default) ) # else: # raise TypeError ## else: ## ctrl = _uitypes.TextFieldGrp( l=labelStr ) ## getter = makeEvalGetter( ctrl.getText ) ## #setter = ctrl.setValue1 ## #if hasDefault: ctrl.setText( default.__repr__() ) # cls.__melcmd__ = staticmethod( ctrl.__melcmd__ ) # self = ctrl.__new__( cls, name, create, **kwargs ) # self.getter = getter # self.ctrlClass = ctrl # return self # # def getValue(self): # return self.getter(self) def valueControlGrp(name=None, create=False, dataType=None, slider=True, value=None, numberOfControls=1, **kwargs): """ This function allows for a simplified interface for automatically creating UI's to control numeric values. A dictionary of keywords shared by all controls can be created and passed to this function and settings which don't pertain to the element being created will will be ignore. For example, 'precision' will be ignored by all non-float UI and 'sliderSteps' will be ignore by all non-slider UIs. :Parameters: dataType : string or class type The dataType that the UI should control. It can be a type object or the string name of the type. For example for a boolean, you can specify 'bool' or pass in the bool class. Also, if the UI is meant to control an array, you can pass the type name as a stirng with a integer suffix representing the array length. ex. 'bool3' numberOfControls : int A parameter for specifying the number of controls per control group. For example, for a checkBoxGrp, numberOfControls will map to the 'numberOfCheckBoxes' keyword. slider : bool Specify whether or not sliders should be used for int and float controls. Ignored for other types, as well as for int and float arrays value : int, int list, bool, bool list, float, float list, string, unicode, Path, Vector, The value for the control. If the value is for an array type, it should be a list or tuple of the appropriate number of elements. A straightforward example: .. python:: settings = {} settings['step'] = 1 settings['precision'] = 3 settings['vertical'] = True # for all checkBoxGrps, lay out vertically win = window() columnLayout() setUITemplate( 'attributeEditorTemplate', pushTemplate=1 ) boolCtr = valueControlGrp( dataType='bool', label='bool', **settings) bool3Ctr = valueControlGrp( dataType='bool', label='bool', numberOfControls=3, **settings) intCtr = valueControlGrp( dataType=int, label='int', slider=False, **settings) intSldr = valueControlGrp( dataType=int, label='int', slider=True, **settings) int3Ctrl= valueControlGrp( dataType=int, label='int', numberOfControls=3, **settings) floatCtr = valueControlGrp( dataType=float, label='float', slider=False, **settings) floatSldr = valueControlGrp( dataType=float, label='float', slider=True, **settings) pathCtrl = valueControlGrp( dataType=Path, label='path', **settings) win.show() Here's an example of how this is meant to be used in practice: .. python:: settings = {} settings['step'] = 1 settings['precision'] = 3 win = window() columnLayout() types=[ ( 'donuts?', bool, True ), # bool arrays have a special label syntax that allow them to pass sub-labels ( [ 'flavors', ['jelly', 'sprinkles', 'glazed']], 'bool3', [0,1,0]), ( 'quantity', int, 12 ), ( 'delivery time', float, .69) ] for label, dt, val in types: valueControlGrp( dataType=dt, label=label, value=val, **settings) win.show() """ def makeGetter( ctrl, methodName, num ): def getter( ): res = [] for i in range( num ): res.append( getattr(ctrl, methodName + str(i+1) )() ) return res return getter def makeSetter( ctrl, methodName, num ): def setter( args ): for i in range( num ): getattr(ctrl, methodName + str(i+1) )(args[i]) return setter # the options below are only valid for certain control types. they can always be passed to valueControlGrp, but # they will be ignore if not applicable to the control for this dataType. this allows you to create a # preset configuration and pass it to the valueControlGrp for every dataType -- no need for creating switches, afterall # that's the point of this function sliderArgs = [ 'sliderSteps', 'ss', 'dragCommand', 'dc' ] fieldArgs = [ 'field', 'f', 'fieldStep', 'fs', 'fieldMinValue', 'fmn', 'fieldMaxValue', 'fmx' ] fieldSliderArgs = ['step', 's', 'minValue', 'min', 'maxValue', 'max', 'extraLabel', 'el'] + sliderArgs + fieldArgs floatFieldArgs = ['precision', 'pre'] verticalArgs = ['vertical', 'vr'] #checkBoxGrp and radioButtonGrp only if _uitypes.PyUI._isBeingCreated(name, create, kwargs): assert dataType, "You must pass a dataType when creating a new control" if not isinstance(dataType, basestring): try: dataType = dataType.__name__ except AttributeError: dataType = str(dataType) # if a dataType such as float3 or int2 was passed, get the number of ctrls try: buf = re.split( '(\d+)', dataType ) dataType = buf[0] numberOfControls = int(buf[1]) except: pass else: # control command lets us get basic info even when we don't know the ui type dataType = control( name, q=1, docTag=1) assert dataType numberOfControls = int(numberOfControls) if numberOfControls < 1: numberOfControls = 1 elif numberOfControls > 4: numberOfControls = 4 #dataType = dataType.lower() kwargs.pop('dt',None) kwargs['docTag'] = dataType if dataType in ["bool"]: if numberOfControls > 1: kwargs.pop('ncb', None) kwargs['numberOfCheckBoxes'] = numberOfControls # remove field/slider and float kwargs for arg in fieldSliderArgs + floatFieldArgs: kwargs.pop(arg, None) # special label handling label = kwargs.get('label', kwargs.get('l',None) ) if label is not None: # allow label passing with additional sub-labels: # ['mainLabel', ['subLabel1', 'subLabel2', 'subLabel3']] if _util.isIterable(label): label, labelArray = label kwargs.pop('l',None) kwargs['label'] = label kwargs['labelArray' + str(numberOfControls) ] = labelArray ctrl = _uitypes.CheckBoxGrp( name, create, **kwargs ) if numberOfControls > 1: getter = makeGetter(ctrl, 'getValue', numberOfControls) setter = makeSetter(ctrl, 'setValue', numberOfControls) else: getter = ctrl.getValue1 setter = ctrl.setValue1 #if hasDefault: ctrl.setValue1( int(default) ) elif dataType in ["int"]: if numberOfControls > 1: kwargs.pop('nf', None) kwargs['numberOfFields'] = numberOfControls slider = False if slider: # remove float kwargs for arg in floatFieldArgs + verticalArgs: kwargs.pop(arg, None) # turn the field on by default if 'field' not in kwargs and 'f' not in kwargs: kwargs['field'] = True ctrl = _uitypes.IntSliderGrp( name, create, **kwargs ) getter = ctrl.getValue setter = ctrl.setValue else: # remove field/slider and float kwargs for arg in fieldSliderArgs + floatFieldArgs + verticalArgs: kwargs.pop(arg, None) ctrl = _uitypes.IntFieldGrp( name, create, **kwargs ) getter = ctrl.getValue1 setter = ctrl.setValue1 #if hasDefault: ctrl.setValue1( int(default) ) elif dataType in ["float"]: if numberOfControls > 1: kwargs.pop('nf', None) kwargs['numberOfFields'] = numberOfControls slider = False if slider: for arg in verticalArgs: kwargs.pop(arg, None) # turn the field on by default if 'field' not in kwargs and 'f' not in kwargs: kwargs['field'] = True ctrl = _uitypes.FloatSliderGrp( name, create, **kwargs ) getter = ctrl.getValue setter = ctrl.setValue else: # remove field/slider kwargs for arg in fieldSliderArgs + verticalArgs: kwargs.pop(arg, None) ctrl = _uitypes.FloatFieldGrp( name, create, **kwargs ) getter = ctrl.getValue1 setter = ctrl.setValue1 #if hasDefault: ctrl.setValue1( float(default) ) elif dataType in ["vector", "Vector"]: # remove field/slider kwargs for arg in fieldSliderArgs + floatFieldArgs + verticalArgs: kwargs.pop(arg, None) ctrl = VectorFieldGrp( name, create, **kwargs ) getter = ctrl.getVector setter = ctrl.setValue1 #if hasDefault: ctrl.setVector( default ) elif dataType in ["path", "Path", "FileReference"]:# or pathreg.search( argName.lower() ): # remove field/slider kwargs for arg in fieldSliderArgs + floatFieldArgs + verticalArgs: kwargs.pop(arg, None) ctrl = PathButtonGrp( name, create, **kwargs ) getter = ctrl.getPath setter = ctrl.setPath #if hasDefault: ctrl.setText( default.__repr__() ) elif dataType in ["string", "unicode", "str"]: # remove field/slider kwargs for arg in fieldSliderArgs + floatFieldArgs + verticalArgs: kwargs.pop(arg, None) ctrl = _uitypes.TextFieldGrp( name, create, **kwargs ) getter = ctrl.getText setter = ctrl.setText #if hasDefault: ctrl.setText( str(default) ) else: raise TypeError, "Unsupported dataType: %s" % dataType # else: # ctrl = _uitypes.TextFieldGrp( l=labelStr ) # getter = makeEvalGetter( ctrl.getText ) # #setter = ctrl.setValue1 # #if hasDefault: ctrl.setText( default.__repr__() ) #new = ctrl( name, create, **kwargs ) ctrl.getValue = getter ctrl.setValue = setter ctrl.dataType = ctrl.getDocTag if value is not None: ctrl.setValue(value) # TODO : remove setDocTag return ctrl def getMainProgressBar(): return _uitypes.ProgressBar(melGlobals['gMainProgressBar'])
bsd-3-clause
-7,241,206,425,842,282,000
35.779783
142
0.584348
false
qxf2/qxf2-page-object-model
utils/gmail/message.py
1
7976
import datetime import email import re import time import os from email.header import decode_header, make_header from imaplib import ParseFlags class Message(): def __init__(self, mailbox, uid): self.uid = uid self.mailbox = mailbox self.gmail = mailbox.gmail if mailbox else None self.message = None self.headers = {} self.subject = None self.body = None self.html = None self.to = None self.fr = None self.cc = None self.delivered_to = None self.sent_at = None self.flags = [] self.labels = [] self.thread_id = None self.thread = [] self.message_id = None self.attachments = None def is_read(self): return ('\\Seen' in self.flags) def read(self): flag = '\\Seen' self.gmail.imap.uid('STORE', self.uid, '+FLAGS', flag) if flag not in self.flags: self.flags.append(flag) def unread(self): flag = '\\Seen' self.gmail.imap.uid('STORE', self.uid, '-FLAGS', flag) if flag in self.flags: self.flags.remove(flag) def is_starred(self): return ('\\Flagged' in self.flags) def star(self): flag = '\\Flagged' self.gmail.imap.uid('STORE', self.uid, '+FLAGS', flag) if flag not in self.flags: self.flags.append(flag) def unstar(self): flag = '\\Flagged' self.gmail.imap.uid('STORE', self.uid, '-FLAGS', flag) if flag in self.flags: self.flags.remove(flag) def is_draft(self): return ('\\Draft' in self.flags) def has_label(self, label): full_label = '%s' % label return (full_label in self.labels) def add_label(self, label): full_label = '%s' % label self.gmail.imap.uid('STORE', self.uid, '+X-GM-LABELS', full_label) if full_label not in self.labels: self.labels.append(full_label) def remove_label(self, label): full_label = '%s' % label self.gmail.imap.uid('STORE', self.uid, '-X-GM-LABELS', full_label) if full_label in self.labels: self.labels.remove(full_label) def is_deleted(self): return ('\\Deleted' in self.flags) def delete(self): flag = '\\Deleted' self.gmail.imap.uid('STORE', self.uid, '+FLAGS', flag) if flag not in self.flags: self.flags.append(flag) trash = '[Gmail]/Trash' if '[Gmail]/Trash' in self.gmail.labels() else '[Gmail]/Bin' if self.mailbox.name not in ['[Gmail]/Bin', '[Gmail]/Trash']: self.move_to(trash) # def undelete(self): # flag = '\\Deleted' # self.gmail.imap.uid('STORE', self.uid, '-FLAGS', flag) # if flag in self.flags: self.flags.remove(flag) def move_to(self, name): self.gmail.copy(self.uid, name, self.mailbox.name) if name not in ['[Gmail]/Bin', '[Gmail]/Trash']: self.delete() def archive(self): self.move_to('[Gmail]/All Mail') def parse_headers(self, message): hdrs = {} for hdr in message.keys(): hdrs[hdr] = message[hdr] return hdrs def parse_flags(self, headers): return list(ParseFlags(headers)) # flags = re.search(r'FLAGS \(([^\)]*)\)', headers).groups(1)[0].split(' ') def parse_labels(self, headers): if re.search(r'X-GM-LABELS \(([^\)]+)\)', headers): labels = re.search(r'X-GM-LABELS \(([^\)]+)\)', headers).groups(1)[0].split(' ') return map(lambda l: l.replace('"', '').decode("string_escape"), labels) else: return list() def parse_subject(self, encoded_subject): dh = decode_header(encoded_subject) default_charset = 'ASCII' return ''.join([ unicode(t[0], t[1] or default_charset) for t in dh ]) def parse(self, raw_message): raw_headers = raw_message[0] raw_email = raw_message[1] self.message = email.message_from_string(raw_email) self.headers = self.parse_headers(self.message) self.to = self.message['to'] self.fr = self.message['from'] self.delivered_to = self.message['delivered_to'] self.subject = self.parse_subject(self.message['subject']) if self.message.get_content_maintype() == "multipart": for content in self.message.walk(): if content.get_content_type() == "text/plain": self.body = content.get_payload(decode=True) elif content.get_content_type() == "text/html": self.html = content.get_payload(decode=True) elif self.message.get_content_maintype() == "text": self.body = self.message.get_payload() self.sent_at = datetime.datetime.fromtimestamp(time.mktime(email.utils.parsedate_tz(self.message['date'])[:9])) self.flags = self.parse_flags(raw_headers) self.labels = self.parse_labels(raw_headers) if re.search(r'X-GM-THRID (\d+)', raw_headers): self.thread_id = re.search(r'X-GM-THRID (\d+)', raw_headers).groups(1)[0] if re.search(r'X-GM-MSGID (\d+)', raw_headers): self.message_id = re.search(r'X-GM-MSGID (\d+)', raw_headers).groups(1)[0] # Parse attachments into attachment objects array for this message self.attachments = [ Attachment(attachment) for attachment in self.message._payload if not isinstance(attachment, basestring) and attachment.get('Content-Disposition') is not None ] def fetch(self): if not self.message: response, results = self.gmail.imap.uid('FETCH', self.uid, '(BODY.PEEK[] FLAGS X-GM-THRID X-GM-MSGID X-GM-LABELS)') self.parse(results[0]) return self.message # returns a list of fetched messages (both sent and received) in chronological order def fetch_thread(self): self.fetch() original_mailbox = self.mailbox self.gmail.use_mailbox(original_mailbox.name) # fetch and cache messages from inbox or other received mailbox response, results = self.gmail.imap.uid('SEARCH', None, '(X-GM-THRID ' + self.thread_id + ')') received_messages = {} uids = results[0].split(' ') if response == 'OK': for uid in uids: received_messages[uid] = Message(original_mailbox, uid) self.gmail.fetch_multiple_messages(received_messages) self.mailbox.messages.update(received_messages) # fetch and cache messages from 'sent' self.gmail.use_mailbox('[Gmail]/Sent Mail') response, results = self.gmail.imap.uid('SEARCH', None, '(X-GM-THRID ' + self.thread_id + ')') sent_messages = {} uids = results[0].split(' ') if response == 'OK': for uid in uids: sent_messages[uid] = Message(self.gmail.mailboxes['[Gmail]/Sent Mail'], uid) self.gmail.fetch_multiple_messages(sent_messages) self.gmail.mailboxes['[Gmail]/Sent Mail'].messages.update(sent_messages) self.gmail.use_mailbox(original_mailbox.name) # combine and sort sent and received messages return sorted(dict(received_messages.items() + sent_messages.items()).values(), key=lambda m: m.sent_at) class Attachment: def __init__(self, attachment): self.name = attachment.get_filename() # Raw file data self.payload = attachment.get_payload(decode=True) # Filesize in kilobytes self.size = int(round(len(self.payload)/1000.0)) def save(self, path=None): if path is None: # Save as name of attachment if there is no path specified path = self.name elif os.path.isdir(path): # If the path is a directory, save as name of attachment in that directory path = os.path.join(path, self.name) with open(path, 'wb') as f: f.write(self.payload)
mit
-7,371,277,480,869,057,000
33.08547
127
0.589895
false
vdloo/jobrunner
tests/unit/jobrunner/log/test_setup_logging.py
1
1393
from logging import INFO, DEBUG from sys import stdout from jobrunner.log import setup_logging from tests.testcase import TestCase class TestSetupLogging(TestCase): def setUp(self): self.get_logger = self.set_up_patch('jobrunner.log.getLogger') self.stream_handler = self.set_up_patch('jobrunner.log.StreamHandler') def test_setup_logging_gets_logger(self): setup_logging() self.get_logger.assert_called_once_with('jobrunner') def test_setup_logging_sets_logging_level_to_info_by_default(self): setup_logging() self.get_logger.return_value.setLevel.assert_called_once_with(INFO) def test_setup_logging_sets_logging_level_to_debug_if_debug_is_specified(self): setup_logging(debug=True) self.get_logger.return_value.setLevel.assert_called_once_with(DEBUG) def test_setup_logging_instantiates_stream_handler_with_stdout(self): setup_logging(debug=True) self.stream_handler.assert_called_once_with(stdout) def test_setup_logging_adds_console_handler_to_logger(self): setup_logging(debug=True) self.get_logger.return_value.addHandler.assert_called_once_with( self.stream_handler.return_value ) def test_setup_logging_returns_logger(self): ret = setup_logging(debug=True) self.assertEqual(ret, self.get_logger.return_value)
apache-2.0
5,715,042,588,193,897,000
31.395349
83
0.704953
false
darbula/django-onlinejudge
onlinejudge/middleware.py
1
1419
from __future__ import absolute_import from django.http import HttpResponseForbidden from .settings import OJ_IP_FILTER, OJ_USERS_IPS, OJ_PROXY_SIGNATURE, \ OJ_REMOTE_ADDR def get_ip(request): return request.META.get("HTTP_X_FORWARDED_FOR", "").strip().split(',')[0] def get_proxy(request): return request.META.get("HTTP_VIA", "").strip().split(',')[0] def get_remote_addr(request): return request.META.get("REMOTE_ADDR", "").strip() class UserIPFilter(object): def process_request(self, request): if not OJ_IP_FILTER: return None #TODO: check if user is in request if request.user.is_superuser: return None for_user_ip = dict(OJ_USERS_IPS).get(request.user.username, None) real_ip = get_ip(request) user_is_inside = real_ip in dict(OJ_USERS_IPS).values() #protect access to other accounts from the inside if (user_is_inside and not request.user.is_anonymous() and for_user_ip!=real_ip): return HttpResponseForbidden() #protect access to the user account from the outside if for_user_ip is None: return None proxy = get_proxy(request) remote_addr = get_remote_addr(request) if (for_user_ip!=real_ip or proxy!=OJ_PROXY_SIGNATURE or remote_addr!=OJ_REMOTE_ADDR): return HttpResponseForbidden()
bsd-3-clause
1,660,031,398,861,833,200
32
77
0.631431
false
the-zebulan/CodeWars
tests/kyu_7_tests/test_every_nth_array_element_basic.py
1
2278
import unittest from katas.kyu_7.every_nth_array_element_basic import every class EveryNthElementTestCase(unittest.TestCase): def setUp(self): self.lst = [0, 1, 2, 3, 4] self.lst2 = list('test') self.lst3 = [None, 1, ['two'], 'three', {4: 'IV'}] def test_equal_1(self): self.assertEqual(every(self.lst, 1), [0, 1, 2, 3, 4]) def test_equal_2(self): self.assertEqual(every(self.lst, 2), [0, 2, 4]) def test_equal_3(self): self.assertEqual(every(self.lst, 3), [0, 3]) def test_equal_4(self): self.assertEqual(every(self.lst, 4), [0, 4]) def test_equal_5(self): self.assertEqual(every(self.lst, 5), [0]) def test_equal_6(self): self.assertEqual(every(self.lst, 1, 1), [1, 2, 3, 4]) def test_equal_7(self): self.assertEqual(every(self.lst, 2, 1), [1, 3]) def test_equal_8(self): self.assertEqual(every(self.lst, 3, 1), [1, 4]) def test_equal_9(self): self.assertEqual(every(self.lst, 4, 1), [1]) def test_equal_10(self): self.assertEqual(every(self.lst, 5, 1), [1]) def test_equal_11(self): self.assertEqual(every(self.lst), [0, 1, 2, 3, 4]) def test_equal_12(self): self.assertEqual(every(self.lst, 1), [0, 1, 2, 3, 4]) def test_equal_13(self): self.assertEqual(every(self.lst, 2), [0, 2, 4]) def test_equal_14(self): self.assertEqual(every(self.lst, 3), [0, 3]) def test_equal_15(self): self.assertEqual(every(self.lst, 1, 3), [3, 4]) def test_equal_16(self): self.assertEqual(every(self.lst, 3, 1), [1, 4]) def test_equal_17(self): self.assertEqual(every(self.lst, 3, 4), [4]) def test_equal_18(self): self.assertEqual(every(self.lst2), ['t', 'e', 's', 't']) def test_equal_19(self): self.assertEqual(every(self.lst2, 2), ['t', 's']) def test_equal_20(self): self.assertEqual(every(self.lst2, 2, 1), ['e', 't']) def test_equal_21(self): self.assertEqual(every(self.lst3, 1), self.lst3) def test_equal_22(self): self.assertEqual(every(self.lst3, 2, 2), [['two'], {4: 'IV'}]) def test_equal_23(self): self.assertEqual(every([None] * 5, 2), [None] * 3)
mit
-18,779,965,900,731,492
27.835443
70
0.571115
false
devs1991/test_edx_docmode
common/djangoapps/student/views.py
1
102710
""" Student Views """ import datetime import logging import uuid import json import warnings from collections import defaultdict from urlparse import urljoin from pytz import UTC from requests import HTTPError from ipware.ip import get_ip from django.conf import settings from django.contrib.auth import logout, authenticate, login from django.contrib.auth.models import User, AnonymousUser from django.contrib.auth.decorators import login_required from django.contrib.auth.views import password_reset_confirm from django.contrib import messages from django.core.context_processors import csrf from django.core import mail from django.core.urlresolvers import reverse, NoReverseMatch from django.core.validators import validate_email, ValidationError from django.db import IntegrityError, transaction from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, Http404) from django.shortcuts import redirect from django.utils.encoding import force_bytes, force_text from django.utils.translation import ungettext from django.utils.http import base36_to_int, urlsafe_base64_encode from django.utils.translation import ugettext as _, get_language from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie from django.views.decorators.http import require_POST, require_GET from django.db.models.signals import post_save from django.dispatch import receiver from django.template.response import TemplateResponse from ratelimitbackend.exceptions import RateLimitException from social.apps.django_app import utils as social_utils from social.backends import oauth as social_oauth from social.exceptions import AuthException, AuthAlreadyAssociated from edxmako.shortcuts import render_to_response, render_to_string from course_modes.models import CourseMode from shoppingcart.api import order_history from student.models import ( Registration, UserProfile, PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user, CourseEnrollmentAllowed, UserStanding, LoginFailures, create_comments_service_user, PasswordHistory, UserSignupSource, DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED) from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form from lms.djangoapps.reg_form.forms import regextrafields from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error from certificates.models import CertificateStatuses, certificate_status_for_student from certificates.api import ( # pylint: disable=import-error get_certificate_url, has_html_certificates_enabled, ) from xmodule.modulestore.django import modulestore from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey from opaque_keys.edx.locator import CourseLocator from xmodule.modulestore import ModuleStoreEnum from collections import namedtuple from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error from courseware.access import has_access from django_comment_common.models import Role from external_auth.models import ExternalAuthMap import external_auth.views from external_auth.login_and_register import ( login as external_auth_login, register as external_auth_register ) from bulk_email.models import Optout, CourseAuthorization from lang_pref import LANGUAGE_KEY import track.views import dogstats_wrapper as dog_stats_api from util.db import outer_atomic from util.json_request import JsonResponse from util.bad_request_rate_limiter import BadRequestRateLimiter from util.milestones_helpers import ( get_pre_requisite_courses_not_completed, ) from microsite_configuration import microsite from util.password_policy_validators import ( validate_password_length, validate_password_complexity, validate_password_dictionary ) import third_party_auth from third_party_auth import pipeline, provider from student.helpers import ( check_verify_status_by_course, auth_pipeline_urls, get_next_url_for_login_page, DISABLE_UNENROLL_CERT_STATES, ) from student.cookies import set_logged_in_cookies, delete_logged_in_cookies from student.models import anonymous_id_for_user from shoppingcart.models import DonationConfiguration, CourseRegistrationCode from embargo import api as embargo_api import analytics from eventtracking import tracker # Note that this lives in LMS, so this dependency should be refactored. from notification_prefs.views import enable_notifications # Note that this lives in openedx, so this dependency should be refactored. from openedx.core.djangoapps.credentials.utils import get_user_program_credentials from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings from openedx.core.djangoapps.user_api.preferences import api as preferences_api from openedx.core.djangoapps.programs.utils import get_programs_for_dashboard from openedx.core.djangoapps.programs.models import ProgramsApiConfig log = logging.getLogger("edx.student") AUDIT_LOG = logging.getLogger("audit") ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated' # Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint # pylint: disable=logging-format-interpolation def csrf_token(context): """A csrf token that can be included in a form.""" token = context.get('csrf_token', '') if token == 'NOTPROVIDED': return '' return (u'<div style="display:none"><input type="hidden"' ' name="csrfmiddlewaretoken" value="%s" /></div>' % (token)) # NOTE: This view is not linked to directly--it is called from # branding/views.py:index(), which is cached for anonymous users. # This means that it should always return the same thing for anon # users. (in particular, no switching based on query params allowed) def index(request, extra_context=None, user=AnonymousUser()): """ Render the edX main page. extra_context is used to allow immediate display of certain modal windows, eg signup, as used by external_auth. """ if extra_context is None: extra_context = {} courses = get_courses(user) if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE", settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]): courses = sort_by_start_date(courses) else: courses = sort_by_announcement(courses) context = {'courses': courses} context['homepage_overlay_html'] = microsite.get_value('homepage_overlay_html') # This appears to be an unused context parameter, at least for the master templates... context['show_partners'] = microsite.get_value('show_partners', True) # TO DISPLAY A YOUTUBE WELCOME VIDEO # 1) Change False to True context['show_homepage_promo_video'] = microsite.get_value('show_homepage_promo_video', False) # 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via microsite config # Note: This value should be moved into a configuration setting and plumbed-through to the # context via the microsite configuration workflow, versus living here youtube_video_id = microsite.get_value('homepage_promo_video_youtube_id', "your-youtube-id") context['homepage_promo_video_youtube_id'] = youtube_video_id # allow for microsite override of the courses list context['courses_list'] = microsite.get_template_path('courses_list.html') # Insert additional context for use in the template context.update(extra_context) return render_to_response('index.html', context) def process_survey_link(survey_link, user): """ If {UNIQUE_ID} appears in the link, replace it with a unique id for the user. Currently, this is sha1(user.username). Otherwise, return survey_link. """ return survey_link.format(UNIQUE_ID=unique_id_for_user(user)) def cert_info(user, course_overview, course_mode): """ Get the certificate info needed to render the dashboard section for the given student and course. Arguments: user (User): A user. course_overview (CourseOverview): A course. course_mode (str): The enrollment mode (honor, verified, audit, etc.) Returns: dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys: 'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted' 'show_download_url': bool 'download_url': url, only present if show_download_url is True 'show_disabled_download_button': bool -- true if state is 'generating' 'show_survey_button': bool 'survey_url': url, only if show_survey_button is True 'grade': if status is not 'processing' 'can_unenroll': if status allows for unenrollment """ if not course_overview.may_certify(): return {} return _cert_info( user, course_overview, certificate_status_for_student(user, course_overview.id), course_mode ) def reverification_info(statuses): """ Returns reverification-related information for *all* of user's enrollments whose reverification status is in statuses. Args: statuses (list): a list of reverification statuses we want information for example: ["must_reverify", "denied"] Returns: dictionary of lists: dictionary with one key per status, e.g. dict["must_reverify"] = [] dict["must_reverify"] = [some information] """ reverifications = defaultdict(list) # Sort the data by the reverification_end_date for status in statuses: if reverifications[status]: reverifications[status].sort(key=lambda x: x.date) return reverifications def get_course_enrollments(user, org_to_include, orgs_to_exclude): """ Given a user, return a filtered set of his or her course enrollments. Arguments: user (User): the user in question. org_to_include (str): for use in Microsites. If not None, ONLY courses of this org will be returned. orgs_to_exclude (list[str]): If org_to_include is not None, this argument is ignored. Else, courses of this org will be excluded. Returns: generator[CourseEnrollment]: a sequence of enrollments to be displayed on the user's dashboard. """ for enrollment in CourseEnrollment.enrollments_for_user(user): # If the course is missing or broken, log an error and skip it. course_overview = enrollment.course_overview if not course_overview: log.error( "User %s enrolled in broken or non-existent course %s", user.username, enrollment.course_id ) continue # If we are in a Microsite, then filter out anything that is not # attributed (by ORG) to that Microsite. if org_to_include and course_overview.location.org != org_to_include: continue # Conversely, if we are not in a Microsite, then filter out any enrollments # with courses attributed (by ORG) to Microsites. elif course_overview.location.org in orgs_to_exclude: continue # Else, include the enrollment. else: yield enrollment def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument """ Implements the logic for cert_info -- split out for testing. Arguments: user (User): A user. course_overview (CourseOverview): A course. course_mode (str): The enrollment mode (honor, verified, audit, etc.) """ # simplify the status for the template using this lookup table template_state = { CertificateStatuses.generating: 'generating', CertificateStatuses.regenerating: 'generating', CertificateStatuses.downloadable: 'ready', CertificateStatuses.notpassing: 'notpassing', CertificateStatuses.restricted: 'restricted', CertificateStatuses.auditing: 'auditing', CertificateStatuses.audit_passing: 'auditing', CertificateStatuses.audit_notpassing: 'auditing', } default_status = 'processing' default_info = { 'status': default_status, 'show_disabled_download_button': False, 'show_download_url': False, 'show_survey_button': False, 'can_unenroll': True, } if cert_status is None: return default_info is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing') if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status: return {} status = template_state.get(cert_status['status'], default_status) status_dict = { 'status': status, 'show_download_url': status == 'ready', 'show_disabled_download_button': status == 'generating', 'mode': cert_status.get('mode', None), 'linked_in_url': None, 'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES, } if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing') and course_overview.end_of_course_survey_url is not None): status_dict.update({ 'show_survey_button': True, 'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)}) else: status_dict['show_survey_button'] = False if status == 'ready': # showing the certificate web view button if certificate is ready state and feature flags are enabled. if has_html_certificates_enabled(course_overview.id, course_overview): if course_overview.has_any_active_web_certificate: status_dict.update({ 'show_cert_web_view': True, 'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid']) }) else: # don't show download certificate button if we don't have an active certificate for course status_dict['show_download_url'] = False elif 'download_url' not in cert_status: log.warning( u"User %s has a downloadable cert for %s, but no download url", user.username, course_overview.id ) return default_info else: status_dict['download_url'] = cert_status['download_url'] # If enabled, show the LinkedIn "add to profile" button # Clicking this button sends the user to LinkedIn where they # can add the certificate information to their profile. linkedin_config = LinkedInAddToProfileConfiguration.current() # posting certificates to LinkedIn is not currently # supported in microsites/White Labels if linkedin_config.enabled and not microsite.is_request_in_microsite(): status_dict['linked_in_url'] = linkedin_config.add_to_profile_url( course_overview.id, course_overview.display_name, cert_status.get('mode'), cert_status['download_url'] ) if status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing'): if 'grade' not in cert_status: # Note: as of 11/20/2012, we know there are students in this state-- cs169.1x, # who need to be regraded (we weren't tracking 'notpassing' at first). # We can add a log.warning here once we think it shouldn't happen. return default_info else: status_dict['grade'] = cert_status['grade'] return status_dict @ensure_csrf_cookie def signin_user(request): """Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`.""" external_auth_response = external_auth_login(request) if external_auth_response is not None: return external_auth_response # Determine the URL to redirect to following login: redirect_to = get_next_url_for_login_page(request) if request.user.is_authenticated(): return redirect(redirect_to) third_party_auth_error = None for msg in messages.get_messages(request): if msg.extra_tags.split()[0] == "social-auth": # msg may or may not be translated. Try translating [again] in case we are able to: third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string break context = { 'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header # Bool injected into JS to submit form if we're inside a running third- # party auth pipeline; distinct from the actual instance of the running # pipeline, if any. 'pipeline_running': 'true' if pipeline.running(request) else 'false', 'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to), 'platform_name': microsite.get_value( 'platform_name', settings.PLATFORM_NAME ), 'third_party_auth_error': third_party_auth_error } return render_to_response('login.html', context) @ensure_csrf_cookie def register_user(request, extra_context=None): """Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`.""" # Determine the URL to redirect to following login: redirect_to = get_next_url_for_login_page(request) if request.user.is_authenticated(): return redirect(redirect_to) external_auth_response = external_auth_register(request) if external_auth_response is not None: return external_auth_response context = { 'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header 'email': '', 'name': '', 'running_pipeline': None, 'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to), 'platform_name': microsite.get_value( 'platform_name', settings.PLATFORM_NAME ), 'selected_provider': '', 'username': '', } if extra_context is not None: context.update(extra_context) if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX): return render_to_response('register-shib.html', context) # If third-party auth is enabled, prepopulate the form with data from the # selected provider. if third_party_auth.is_enabled() and pipeline.running(request): running_pipeline = pipeline.get(request) current_provider = provider.Registry.get_from_pipeline(running_pipeline) if current_provider is not None: overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs')) overrides['running_pipeline'] = running_pipeline overrides['selected_provider'] = current_provider.name context.update(overrides) return render_to_response('register.html', context) def complete_course_mode_info(course_id, enrollment, modes=None): """ We would like to compute some more information from the given course modes and the user's current enrollment Returns the given information: - whether to show the course upsell information - numbers of days until they can't upsell anymore """ if modes is None: modes = CourseMode.modes_for_course_dict(course_id) mode_info = {'show_upsell': False, 'days_for_upsell': None} # we want to know if the user is already enrolled as verified or credit and # if verified is an option. if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES: mode_info['show_upsell'] = True mode_info['verified_sku'] = modes['verified'].sku # if there is an expiration date, find out how long from now it is if modes['verified'].expiration_datetime: today = datetime.datetime.now(UTC).date() mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days return mode_info def is_course_blocked(request, redeemed_registration_codes, course_key): """Checking either registration is blocked or not .""" blocked = False for redeemed_registration in redeemed_registration_codes: # registration codes may be generated via Bulk Purchase Scenario # we have to check only for the invoice generated registration codes # that their invoice is valid or not if redeemed_registration.invoice_item: if not redeemed_registration.invoice_item.invoice.is_valid: blocked = True # disabling email notifications for unpaid registration courses Optout.objects.get_or_create(user=request.user, course_id=course_key) log.info( u"User %s (%s) opted out of receiving emails from course %s", request.user.username, request.user.email, course_key, ) track.views.server_track( request, "change-email1-settings", {"receive_emails": "no", "course": course_key.to_deprecated_string()}, page='dashboard', ) break return blocked @login_required @ensure_csrf_cookie def dashboard(request): user = request.user platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME) # for microsites, we want to filter and only show enrollments for courses within # the microsites 'ORG' course_org_filter = microsite.get_value('course_org_filter') # Let's filter out any courses in an "org" that has been declared to be # in a Microsite org_filter_out_set = microsite.get_all_orgs() # remove our current Microsite from the "filter out" list, if applicable if course_org_filter: org_filter_out_set.remove(course_org_filter) # Build our (course, enrollment) list for the user, but ignore any courses that no # longer exist (because the course IDs have changed). Still, we don't delete those # enrollments, because it could have been a data push snafu. course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set)) # sort the enrollment pairs by the enrollment date course_enrollments.sort(key=lambda x: x.created, reverse=True) # Retrieve the course modes for each course enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments] __, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids) course_modes_by_course = { course_id: { mode.slug: mode for mode in modes } for course_id, modes in unexpired_course_modes.iteritems() } # Check to see if the student has recently enrolled in a course. # If so, display a notification message confirming the enrollment. enrollment_message = _create_recent_enrollment_message( course_enrollments, course_modes_by_course ) course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True) message = "" if not user.is_active: message = render_to_string( 'registration/activate_account_notice.html', {'email': user.email, 'platform_name': platform_name} ) # Global staff can see what courses errored on their dashboard staff_access = False errored_courses = {} if has_access(user, 'staff', 'global'): # Show any courses that errored on load staff_access = True errored_courses = modulestore().get_errored_courses() show_courseware_links_for = frozenset( enrollment.course_id for enrollment in course_enrollments if has_access(request.user, 'load', enrollment.course_overview) and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview) ) # Get any programs associated with courses being displayed. # This is passed along in the template context to allow rendering of # program-related information on the dashboard. course_programs = _get_course_programs(user, [enrollment.course_id for enrollment in course_enrollments]) xseries_credentials = _get_xseries_credentials(user) # Construct a dictionary of course mode information # used to render the course list. We re-use the course modes dict # we loaded earlier to avoid hitting the database. course_mode_info = { enrollment.course_id: complete_course_mode_info( enrollment.course_id, enrollment, modes=course_modes_by_course[enrollment.course_id] ) for enrollment in course_enrollments } # Determine the per-course verification status # This is a dictionary in which the keys are course locators # and the values are one of: # # VERIFY_STATUS_NEED_TO_VERIFY # VERIFY_STATUS_SUBMITTED # VERIFY_STATUS_APPROVED # VERIFY_STATUS_MISSED_DEADLINE # # Each of which correspond to a particular message to display # next to the course on the dashboard. # # If a course is not included in this dictionary, # there is no verification messaging to display. verify_status_by_course = check_verify_status_by_course(user, course_enrollments) cert_statuses = { enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode) for enrollment in course_enrollments } # only show email settings for Mongo course and when bulk email is turned on show_email_settings_for = frozenset( enrollment.course_id for enrollment in course_enrollments if ( settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and modulestore().get_modulestore_type(enrollment.course_id) != ModuleStoreEnum.Type.xml and CourseAuthorization.instructor_email_enabled(enrollment.course_id) ) ) # Verification Attempts # Used to generate the "you must reverify for course x" banner verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user) # Gets data for midcourse reverifications, if any are necessary or have failed statuses = ["approved", "denied", "pending", "must_reverify"] reverifications = reverification_info(statuses) show_refund_option_for = frozenset( enrollment.course_id for enrollment in course_enrollments if enrollment.refundable() ) block_courses = frozenset( enrollment.course_id for enrollment in course_enrollments if is_course_blocked( request, CourseRegistrationCode.objects.filter( course_id=enrollment.course_id, registrationcoderedemption__redeemed_by=request.user ), enrollment.course_id ) ) enrolled_courses_either_paid = frozenset( enrollment.course_id for enrollment in course_enrollments if enrollment.is_paid_course() ) # If there are *any* denied reverifications that have not been toggled off, # we'll display the banner denied_banner = any(item.display for item in reverifications["denied"]) # Populate the Order History for the side-bar. order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set) # get list of courses having pre-requisites yet to be completed courses_having_prerequisites = frozenset( enrollment.course_id for enrollment in course_enrollments if enrollment.course_overview.pre_requisite_courses ) courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites) if 'notlive' in request.GET: redirect_message = _("The course you are looking for does not start until {date}.").format( date=request.GET['notlive'] ) elif 'course_closed' in request.GET: redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format( date=request.GET['course_closed'] ) else: redirect_message = '' context = { 'enrollment_message': enrollment_message, 'redirect_message': redirect_message, 'course_enrollments': course_enrollments, 'course_optouts': course_optouts, 'message': message, 'staff_access': staff_access, 'errored_courses': errored_courses, 'show_courseware_links_for': show_courseware_links_for, 'all_course_modes': course_mode_info, 'cert_statuses': cert_statuses, 'credit_statuses': _credit_statuses(user, course_enrollments), 'show_email_settings_for': show_email_settings_for, 'reverifications': reverifications, 'verification_status': verification_status, 'verification_status_by_course': verify_status_by_course, 'verification_msg': verification_msg, 'show_refund_option_for': show_refund_option_for, 'block_courses': block_courses, 'denied_banner': denied_banner, 'billing_email': settings.PAYMENT_SUPPORT_EMAIL, 'user': user, 'logout_url': reverse(logout_user), 'platform_name': platform_name, 'enrolled_courses_either_paid': enrolled_courses_either_paid, 'provider_states': [], 'order_history_list': order_history_list, 'courses_requirements_not_met': courses_requirements_not_met, 'nav_hidden': True, 'course_programs': course_programs, 'disable_courseware_js': True, 'xseries_credentials': xseries_credentials, 'show_program_listing': ProgramsApiConfig.current().show_program_listing, } ecommerce_service = EcommerceService() if ecommerce_service.is_enabled(request.user): context.update({ 'use_ecommerce_payment_flow': True, 'ecommerce_payment_page': ecommerce_service.payment_page_url(), }) return render_to_response('dashboard.html', context) def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name """ Builds a recent course enrollment message. Constructs a new message template based on any recent course enrollments for the student. Args: course_enrollments (list[CourseEnrollment]): a list of course enrollments. course_modes (dict): Mapping of course ID's to course mode dictionaries. Returns: A string representing the HTML message output from the message template. None if there are no recently enrolled courses. """ recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments) if recently_enrolled_courses: enroll_messages = [ { "course_id": enrollment.course_overview.id, "course_name": enrollment.course_overview.display_name, "allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment) } for enrollment in recently_enrolled_courses ] platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME) return render_to_string( 'enrollment/course_enrollment_message.html', {'course_enrollment_messages': enroll_messages, 'platform_name': platform_name} ) def _get_recently_enrolled_courses(course_enrollments): """ Given a list of enrollments, filter out all but recent enrollments. Args: course_enrollments (list[CourseEnrollment]): A list of course enrollments. Returns: list[CourseEnrollment]: A list of recent course enrollments. """ seconds = DashboardConfiguration.current().recent_enrollment_time_delta time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds)) return [ enrollment for enrollment in course_enrollments # If the enrollment has no created date, we are explicitly excluding the course # from the list of recent enrollments. if enrollment.is_active and enrollment.created > time_delta ] def _allow_donation(course_modes, course_id, enrollment): """Determines if the dashboard will request donations for the given course. Check if donations are configured for the platform, and if the current course is accepting donations. Args: course_modes (dict): Mapping of course ID's to course mode dictionaries. course_id (str): The unique identifier for the course. enrollment(CourseEnrollment): The enrollment object in which the user is enrolled Returns: True if the course is allowing donations. """ donations_enabled = DonationConfiguration.current().enabled return ( donations_enabled and enrollment.mode in course_modes[course_id] and course_modes[course_id][enrollment.mode].min_price == 0 ) def _update_email_opt_in(request, org): """Helper function used to hit the profile API if email opt-in is enabled.""" email_opt_in = request.POST.get('email_opt_in') if email_opt_in is not None: email_opt_in_boolean = email_opt_in == 'true' preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean) def _credit_statuses(user, course_enrollments): """ Retrieve the status for credit courses. A credit course is a course for which a user can purchased college credit. The current flow is: 1. User becomes eligible for credit (submits verifications, passes the course, etc.) 2. User purchases credit from a particular credit provider. 3. User requests credit from the provider, usually creating an account on the provider's site. 4. The credit provider notifies us whether the user's request for credit has been accepted or rejected. The dashboard is responsible for communicating the user's state in this flow. Arguments: user (User): The currently logged-in user. course_enrollments (list[CourseEnrollment]): List of enrollments for the user. Returns: dict The returned dictionary has keys that are `CourseKey`s and values that are dictionaries with: * eligible (bool): True if the user is eligible for credit in this course. * deadline (datetime): The deadline for purchasing and requesting credit for this course. * purchased (bool): Whether the user has purchased credit for this course. * provider_name (string): The display name of the credit provider. * provider_status_url (string): A URL the user can visit to check on their credit request status. * request_status (string): Either "pending", "approved", or "rejected" * error (bool): If true, an unexpected error occurred when retrieving the credit status, so the user should contact the support team. Example: >>> _credit_statuses(user, course_enrollments) { CourseKey.from_string("edX/DemoX/Demo_Course"): { "course_key": "edX/DemoX/Demo_Course", "eligible": True, "deadline": 2015-11-23 00:00:00 UTC, "purchased": True, "provider_name": "Hogwarts", "provider_status_url": "http://example.com/status", "request_status": "pending", "error": False } } """ from openedx.core.djangoapps.credit import api as credit_api # Feature flag off if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"): return {} request_status_by_course = { request["course_key"]: request["status"] for request in credit_api.get_credit_requests_for_user(user.username) } credit_enrollments = { enrollment.course_id: enrollment for enrollment in course_enrollments if enrollment.mode == "credit" } # When a user purchases credit in a course, the user's enrollment # mode is set to "credit" and an enrollment attribute is set # with the ID of the credit provider. We retrieve *all* such attributes # here to minimize the number of database queries. purchased_credit_providers = { attribute.enrollment.course_id: attribute.value for attribute in CourseEnrollmentAttribute.objects.filter( namespace="credit", name="provider_id", enrollment__in=credit_enrollments.values() ).select_related("enrollment") } provider_info_by_id = { provider["id"]: provider for provider in credit_api.get_credit_providers() } statuses = {} for eligibility in credit_api.get_eligibilities_for_user(user.username): course_key = CourseKey.from_string(unicode(eligibility["course_key"])) providers_names = get_credit_provider_display_names(course_key) status = { "course_key": unicode(course_key), "eligible": True, "deadline": eligibility["deadline"], "purchased": course_key in credit_enrollments, "provider_name": make_providers_strings(providers_names), "provider_status_url": None, "provider_id": None, "request_status": request_status_by_course.get(course_key), "error": False, } # If the user has purchased credit, then include information about the credit # provider from which the user purchased credit. # We retrieve the provider's ID from the an "enrollment attribute" set on the user's # enrollment when the user's order for credit is fulfilled by the E-Commerce service. if status["purchased"]: provider_id = purchased_credit_providers.get(course_key) if provider_id is None: status["error"] = True log.error( u"Could not find credit provider associated with credit enrollment " u"for user %s in course %s. The user will not be able to see his or her " u"credit request status on the student dashboard. This attribute should " u"have been set when the user purchased credit in the course.", user.id, course_key ) else: provider_info = provider_info_by_id.get(provider_id, {}) status["provider_name"] = provider_info.get("display_name") status["provider_status_url"] = provider_info.get("status_url") status["provider_id"] = provider_id statuses[course_key] = status return statuses @transaction.non_atomic_requests @require_POST @outer_atomic(read_committed=True) def change_enrollment(request, check_access=True): """ Modify the enrollment status for the logged-in user. The request parameter must be a POST request (other methods return 405) that specifies course_id and enrollment_action parameters. If course_id or enrollment_action is not specified, if course_id is not valid, if enrollment_action is something other than "enroll" or "unenroll", if enrollment_action is "enroll" and enrollment is closed for the course, or if enrollment_action is "unenroll" and the user is not enrolled in the course, a 400 error will be returned. If the user is not logged in, 403 will be returned; it is important that only this case return 403 so the front end can redirect the user to a registration or login page when this happens. This function should only be called from an AJAX request, so the error messages in the responses should never actually be user-visible. Args: request (`Request`): The Django request object Keyword Args: check_access (boolean): If True, we check that an accessible course actually exists for the given course_key before we enroll the student. The default is set to False to avoid breaking legacy code or code with non-standard flows (ex. beta tester invitations), but for any standard enrollment flow you probably want this to be True. Returns: Response """ # Get the user user = request.user # Ensure the user is authenticated if not user.is_authenticated(): return HttpResponseForbidden() # Ensure we received a course_id action = request.POST.get("enrollment_action") if 'course_id' not in request.POST: return HttpResponseBadRequest(_("Course id not specified")) try: course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id")) except InvalidKeyError: log.warning( u"User %s tried to %s with invalid course id: %s", user.username, action, request.POST.get("course_id"), ) return HttpResponseBadRequest(_("Invalid course id")) if action == "enroll": # Make sure the course exists # We don't do this check on unenroll, or a bad course id can't be unenrolled from if not modulestore().has_course(course_id): log.warning( u"User %s tried to enroll in non-existent course %s", user.username, course_id ) return HttpResponseBadRequest(_("Course id is invalid")) # Record the user's email opt-in preference if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'): _update_email_opt_in(request, course_id.org) available_modes = CourseMode.modes_for_course_dict(course_id) # Check whether the user is blocked from enrolling in this course # This can occur if the user's IP is on a global blacklist # or if the user is enrolling in a country in which the course # is not available. redirect_url = embargo_api.redirect_if_blocked( course_id, user=user, ip_address=get_ip(request), url=request.path ) if redirect_url: return HttpResponse(redirect_url) # Check that auto enrollment is allowed for this course # (= the course is NOT behind a paywall) if CourseMode.can_auto_enroll(course_id): # Enroll the user using the default mode (audit) # We're assuming that users of the course enrollment table # will NOT try to look up the course enrollment model # by its slug. If they do, it's possible (based on the state of the database) # for no such model to exist, even though we've set the enrollment type # to "audit". try: enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes) if enroll_mode: CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode) except Exception: # pylint: disable=broad-except return HttpResponseBadRequest(_("Could not enroll")) # If we have more than one course mode or professional ed is enabled, # then send the user to the choose your track page. # (In the case of no-id-professional/professional ed, this will redirect to a page that # funnels users directly into the verification / payment flow) if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes): return HttpResponse( reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)}) ) # Otherwise, there is only one mode available (the default) return HttpResponse() elif action == "unenroll": enrollment = CourseEnrollment.get_enrollment(user, course_id) if not enrollment: return HttpResponseBadRequest(_("You are not enrolled in this course")) certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode) if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES: return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course")) CourseEnrollment.unenroll(user, course_id) return HttpResponse() else: return HttpResponseBadRequest(_("Enrollment action is invalid")) # Need different levels of logging @ensure_csrf_cookie def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument """AJAX request to log in the user.""" backend_name = None email = None password = None redirect_url = None response = None running_pipeline = None third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request) third_party_auth_successful = False trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password')) user = None platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME) if third_party_auth_requested and not trumped_by_first_party_auth: # The user has already authenticated via third-party auth and has not # asked to do first party auth by supplying a username or password. We # now want to put them through the same logging and cookie calculation # logic as with first-party auth. running_pipeline = pipeline.get(request) username = running_pipeline['kwargs'].get('username') backend_name = running_pipeline['backend'] third_party_uid = running_pipeline['kwargs']['uid'] requested_provider = provider.Registry.get_from_pipeline(running_pipeline) try: user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid) third_party_auth_successful = True except User.DoesNotExist: AUDIT_LOG.warning( u"Login failed - user with username {username} has no social auth " "with backend_name {backend_name}".format( username=username, backend_name=backend_name) ) message = _( "You've successfully logged into your {provider_name} account, " "but this account isn't linked with an {platform_name} account yet." ).format( platform_name=platform_name, provider_name=requested_provider.name, ) message += "<br/><br/>" message += _( "Use your {platform_name} username and password to log into {platform_name} below, " "and then link your {platform_name} account with {provider_name} from your dashboard." ).format( platform_name=platform_name, provider_name=requested_provider.name, ) message += "<br/><br/>" message += _( "If you don't have an {platform_name} account yet, " "click <strong>Register</strong> at the top of the page." ).format( platform_name=platform_name ) return HttpResponse(message, content_type="text/plain", status=403) else: if 'email' not in request.POST or 'password' not in request.POST: return JsonResponse({ "success": False, # TODO: User error message "value": _('There was an error receiving your login information. Please email us.'), }) # TODO: this should be status code 400 email = request.POST['email'] password = request.POST['password'] try: user = User.objects.get(email=email) except User.DoesNotExist: if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning(u"Login failed - Unknown user email") else: AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email)) # check if the user has a linked shibboleth account, if so, redirect the user to shib-login # This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu # address into the Gmail login. if settings.FEATURES.get('AUTH_USE_SHIB') and user: try: eamap = ExternalAuthMap.objects.get(user=user) if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX): return JsonResponse({ "success": False, "redirect": reverse('shib-login'), }) # TODO: this should be status code 301 # pylint: disable=fixme except ExternalAuthMap.DoesNotExist: # This is actually the common case, logging in user without external linked login AUDIT_LOG.info(u"User %s w/o external auth attempting login", user) # see if account has been locked out due to excessive login failures user_found_by_email_lookup = user if user_found_by_email_lookup and LoginFailures.is_feature_enabled(): if LoginFailures.is_user_locked_out(user_found_by_email_lookup): lockout_message = _('This account has been temporarily locked due ' 'to excessive login failures. Try again later.') return JsonResponse({ "success": False, "value": lockout_message, }) # TODO: this should be status code 429 # pylint: disable=fixme # see if the user must reset his/her password due to any policy settings if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup): return JsonResponse({ "success": False, "value": _('Your password has expired due to password policy on this account. You must ' 'reset your password before you can log in again. Please click the ' '"Forgot Password" link on this page to reset your password before logging in again.'), }) # TODO: this should be status code 403 # pylint: disable=fixme # if the user doesn't exist, we want to set the username to an invalid # username so that authentication is guaranteed to fail and we can take # advantage of the ratelimited backend username = user.username if user else "" if not third_party_auth_successful: try: user = authenticate(username=username, password=password, request=request) # this occurs when there are too many attempts from the same IP address except RateLimitException: return JsonResponse({ "success": False, "value": _('Too many failed login attempts. Try again later.'), }) # TODO: this should be status code 429 # pylint: disable=fixme if user is None: # tick the failed login counters if the user exists in the database if user_found_by_email_lookup and LoginFailures.is_feature_enabled(): LoginFailures.increment_lockout_counter(user_found_by_email_lookup) # if we didn't find this username earlier, the account for this email # doesn't exist, and doesn't have a corresponding password if username != "": if settings.FEATURES['SQUELCH_PII_IN_LOGS']: loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>" AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id)) else: AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email)) return JsonResponse({ "success": False, "value": _('Email or password is incorrect.'), }) # TODO: this should be status code 400 # pylint: disable=fixme # successful login, clear failed login attempts counters, if applicable if LoginFailures.is_feature_enabled(): LoginFailures.clear_lockout_counter(user) # Track the user's sign in if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY: tracking_context = tracker.get_tracker().resolve_context() analytics.identify( user.id, { 'email': email, 'username': username }, { # Disable MailChimp because we don't want to update the user's email # and username in MailChimp on every page load. We only need to capture # this data on registration/activation. 'MailChimp': False } ) analytics.track( user.id, "edx.bi.user.account.authenticated", { 'category': "conversion", 'label': request.POST.get('course_id'), 'provider': None }, context={ 'ip': tracking_context.get('ip'), 'Google Analytics': { 'clientId': tracking_context.get('client_id') } } ) if user is not None and user.is_active: try: # We do not log here, because we have a handler registered # to perform logging on successful logins. login(request, user) if request.POST.get('remember') == 'true': request.session.set_expiry(604800) log.debug("Setting user session to never expire") else: request.session.set_expiry(0) except Exception as exc: # pylint: disable=broad-except AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?") log.critical("Login failed - Could not create session. Is memcached running?") log.exception(exc) raise redirect_url = None # The AJAX method calling should know the default destination upon success if third_party_auth_successful: redirect_url = pipeline.get_complete_url(backend_name) response = JsonResponse({ "success": True, "redirect_url": redirect_url, }) # Ensure that the external marketing site can # detect that the user is logged in. return set_logged_in_cookies(request, response, user) if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id)) else: AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username)) reactivation_email_for_user(user) not_activated_msg = _("This account has not been activated. We have sent another activation " "message. Please check your email for the activation instructions.") return JsonResponse({ "success": False, "value": not_activated_msg, }) # TODO: this should be status code 400 # pylint: disable=fixme @csrf_exempt @require_POST @social_utils.strategy("social:complete") def login_oauth_token(request, backend): """ Authenticate the client using an OAuth access token by using the token to retrieve information from a third party and matching that information to an existing user. """ warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning) backend = request.backend if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2): if "access_token" in request.POST: # Tell third party auth pipeline that this is an API call request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API user = None try: user = backend.do_auth(request.POST["access_token"]) except (HTTPError, AuthException): pass # do_auth can return a non-User object if it fails if user and isinstance(user, User): login(request, user) return JsonResponse(status=204) else: # Ensure user does not re-enter the pipeline request.social_strategy.clean_partial_pipeline() return JsonResponse({"error": "invalid_token"}, status=401) else: return JsonResponse({"error": "invalid_request"}, status=400) raise Http404 @ensure_csrf_cookie def logout_user(request): """ HTTP request to log out the user. Redirects to marketing page. Deletes both the CSRF and sessionid cookies so the marketing site can determine the logged in state of the user """ # We do not log here, because we have a handler registered # to perform logging on successful logouts. request.is_from_logout = True logout(request) if settings.FEATURES.get('AUTH_USE_CAS'): target = reverse('cas-logout') else: target = '/' response = redirect(target) delete_logged_in_cookies(response) return response @require_GET @login_required @ensure_csrf_cookie def manage_user_standing(request): """ Renders the view used to manage user standing. Also displays a table of user accounts that have been disabled and who disabled them. """ if not request.user.is_staff: raise Http404 all_disabled_accounts = UserStanding.objects.filter( account_status=UserStanding.ACCOUNT_DISABLED ) all_disabled_users = [standing.user for standing in all_disabled_accounts] headers = ['username', 'account_changed_by'] rows = [] for user in all_disabled_users: row = [user.username, user.standing.changed_by] rows.append(row) context = {'headers': headers, 'rows': rows} return render_to_response("manage_user_standing.html", context) @require_POST @login_required @ensure_csrf_cookie def disable_account_ajax(request): """ Ajax call to change user standing. Endpoint of the form in manage_user_standing.html """ if not request.user.is_staff: raise Http404 username = request.POST.get('username') context = {} if username is None or username.strip() == '': context['message'] = _('Please enter a username') return JsonResponse(context, status=400) account_action = request.POST.get('account_action') if account_action is None: context['message'] = _('Please choose an option') return JsonResponse(context, status=400) username = username.strip() try: user = User.objects.get(username=username) except User.DoesNotExist: context['message'] = _("User with username {} does not exist").format(username) return JsonResponse(context, status=400) else: user_account, _success = UserStanding.objects.get_or_create( user=user, defaults={'changed_by': request.user}, ) if account_action == 'disable': user_account.account_status = UserStanding.ACCOUNT_DISABLED context['message'] = _("Successfully disabled {}'s account").format(username) log.info(u"%s disabled %s's account", request.user, username) elif account_action == 'reenable': user_account.account_status = UserStanding.ACCOUNT_ENABLED context['message'] = _("Successfully reenabled {}'s account").format(username) log.info(u"%s reenabled %s's account", request.user, username) else: context['message'] = _("Unexpected account status") return JsonResponse(context, status=400) user_account.changed_by = request.user user_account.standing_last_changed_at = datetime.datetime.now(UTC) user_account.save() return JsonResponse(context) @login_required @ensure_csrf_cookie def change_setting(request): """JSON call to change a profile setting: Right now, location""" # TODO (vshnayder): location is no longer used u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache if 'location' in request.POST: u_prof.location = request.POST['location'] u_prof.save() return JsonResponse({ "success": True, "location": u_prof.location, }) class AccountValidationError(Exception): def __init__(self, message, field): super(AccountValidationError, self).__init__(message) self.field = field @receiver(post_save, sender=User) def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument """ handler that saves the user Signup Source when the user is created """ if 'created' in kwargs and kwargs['created']: site = microsite.get_value('SITE_NAME') if site: user_signup_source = UserSignupSource(user=kwargs['instance'], site=site) user_signup_source.save() log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id)) def _do_create_account(form, custom_form=None): """ Given cleaned post variables, create the User and UserProfile objects, as well as the registration for this user. Returns a tuple (User, UserProfile, Registration). Note: this function is also used for creating test users. """ errors = {} errors.update(form.errors) if custom_form: errors.update(custom_form.errors) if errors: raise ValidationError(errors) user = User( username=form.cleaned_data["username"], email=form.cleaned_data["email"], is_active=False ) user.set_password(form.cleaned_data["password"]) registration = Registration() # TODO: Rearrange so that if part of the process fails, the whole process fails. # Right now, we can have e.g. no registration e-mail sent out and a zombie account try: with transaction.atomic(): user.save() if custom_form: custom_model = custom_form.save(commit=False) custom_model.user = user custom_model.save() except IntegrityError: # Figure out the cause of the integrity error if len(User.objects.filter(username=user.username)) > 0: raise AccountValidationError( _("An account with the Public Username '{username}' already exists.").format(username=user.username), field="username" ) elif len(User.objects.filter(email=user.email)) > 0: raise AccountValidationError( _("An account with the Email '{email}' already exists.").format(email=user.email), field="email" ) else: raise # add this account creation to password history # NOTE, this will be a NOP unless the feature has been turned on in configuration password_history_entry = PasswordHistory() password_history_entry.create(user) registration.register(user) profile_fields = [ "name", "level_of_education", "gender", "mailing_address", "city", "country", "goals", "year_of_birth" ] profile = UserProfile( user=user, **{key: form.cleaned_data.get(key) for key in profile_fields} ) extended_profile = form.cleaned_extended_profile if extended_profile: profile.meta = json.dumps(extended_profile) try: profile.save() except Exception: # pylint: disable=broad-except log.exception("UserProfile creation failed for user {id}.".format(id=user.id)) raise return (user, profile, registration) def create_account_with_params(request, params): """ Given a request and a dict of parameters (which may or may not have come from the request), create an account for the requesting user, including creating a comments service user object and sending an activation email. This also takes external/third-party auth into account, updates that as necessary, and authenticates the user for the request's session. Does not return anything. Raises AccountValidationError if an account with the username or email specified by params already exists, or ValidationError if any of the given parameters is invalid for any other reason. Issues with this code: * It is not transactional. If there is a failure part-way, an incomplete account will be created and left in the database. * Third-party auth passwords are not verified. There is a comment that they are unused, but it would be helpful to have a sanity check that they are sane. * It is over 300 lines long (!) and includes disprate functionality, from registration e-mails to all sorts of other things. It should be broken up into semantically meaningful functions. * The user-facing text is rather unfriendly (e.g. "Username must be a minimum of two characters long" rather than "Please use a username of at least two characters"). """ # Copy params so we can modify it; we can't just do dict(params) because if # params is request.POST, that results in a dict containing lists of values params = dict(params.items()) # allow for microsites to define their own set of required/optional/hidden fields extra_fields = microsite.get_value( 'REGISTRATION_EXTRA_FIELDS', getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {}) ) # Boolean of whether a 3rd party auth provider and credentials were provided in # the API so the newly created account can link with the 3rd party account. # # Note: this is orthogonal to the 3rd party authentication pipeline that occurs # when the account is created via the browser and redirect URLs. should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)): params["password"] = pipeline.make_random_password() # if doing signup for an external authorization, then get email, password, name from the eamap # don't use the ones from the form, since the user could have hacked those # unless originally we didn't get a valid email or name from the external auth # TODO: We do not check whether these values meet all necessary criteria, such as email length do_external_auth = 'ExternalAuthMap' in request.session if do_external_auth: eamap = request.session['ExternalAuthMap'] try: validate_email(eamap.external_email) params["email"] = eamap.external_email except ValidationError: pass if eamap.external_name.strip() != '': params["name"] = eamap.external_name params["password"] = eamap.internal_password log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"]) extended_profile_fields = microsite.get_value('extended_profile_fields', []) enforce_password_policy = ( settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and not do_external_auth ) # Can't have terms of service for certain SHIB users, like at Stanford registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {}) tos_required = ( registration_fields.get('terms_of_service') != 'hidden' or registration_fields.get('honor_code') != 'hidden' ) and ( not settings.FEATURES.get("AUTH_USE_SHIB") or not settings.FEATURES.get("SHIB_DISABLE_TOS") or not do_external_auth or not eamap.external_domain.startswith( external_auth.views.SHIBBOLETH_DOMAIN_PREFIX ) ) form = AccountCreationForm( data=params, extra_fields=extra_fields, extended_profile_fields=extended_profile_fields, enforce_username_neq_password=True, enforce_password_policy=enforce_password_policy, tos_required=tos_required, ) custom_form = get_registration_extension_form(data=params) # Perform operations within a transaction that are critical to account creation with transaction.atomic(): # first, create the account (user, profile, registration) = _do_create_account(form, custom_form) # next, link the account with social auth, if provided via the API. # (If the user is using the normal register page, the social auth pipeline does the linking, not this code) if should_link_with_social_auth: backend_name = params['provider'] request.social_strategy = social_utils.load_strategy(request) redirect_uri = reverse('social:complete', args=(backend_name, )) request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri) social_access_token = params.get('access_token') if not social_access_token: raise ValidationError({ 'access_token': [ _("An access_token is required when passing value ({}) for provider.").format( params['provider'] ) ] }) request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API pipeline_user = None error_message = "" try: pipeline_user = request.backend.do_auth(social_access_token, user=user) except AuthAlreadyAssociated: error_message = _("The provided access_token is already associated with another user.") except (HTTPError, AuthException): error_message = _("The provided access_token is not valid.") if not pipeline_user or not isinstance(pipeline_user, User): # Ensure user does not re-enter the pipeline request.social_strategy.clean_partial_pipeline() raise ValidationError({'access_token': [error_message]}) # Perform operations that are non-critical parts of account creation preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language()) if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'): try: enable_notifications(user) except Exception: # pylint: disable=broad-except log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id)) dog_stats_api.increment("common.student.account_created") # If the user is registering via 3rd party auth, track which provider they use third_party_provider = None running_pipeline = None if third_party_auth.is_enabled() and pipeline.running(request): running_pipeline = pipeline.get(request) third_party_provider = provider.Registry.get_from_pipeline(running_pipeline) # Track the user's registration if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY: tracking_context = tracker.get_tracker().resolve_context() identity_args = [ user.id, # pylint: disable=no-member { 'email': user.email, 'username': user.username, 'name': profile.name, # Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey. 'age': profile.age or -1, 'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year, 'education': profile.level_of_education_display, 'address': profile.mailing_address, 'gender': profile.gender_display, 'country': unicode(profile.country), } ] if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'): identity_args.append({ "MailChimp": { "listId": settings.MAILCHIMP_NEW_USER_LIST_ID } }) analytics.identify(*identity_args) analytics.track( user.id, "edx.bi.user.account.registered", { 'category': 'conversion', 'label': params.get('course_id'), 'provider': third_party_provider.name if third_party_provider else None }, context={ 'ip': tracking_context.get('ip'), 'Google Analytics': { 'clientId': tracking_context.get('client_id') } } ) create_comments_service_user(user) # Don't send email if we are: # # 1. Doing load testing. # 2. Random user generation for other forms of testing. # 3. External auth bypassing activation. # 4. Have the platform configured to not require e-mail activation. # 5. Registering a new user using a trusted third party provider (with skip_email_verification=True) # # Note that this feature is only tested as a flag set one way or # the other for *new* systems. we need to be careful about # changing settings on a running system to make sure no users are # left in an inconsistent state (or doing a migration if they are). send_email = ( not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and not ( third_party_provider and third_party_provider.skip_email_verification and user.email == running_pipeline['kwargs'].get('details', {}).get('email') ) ) if send_email: context = { 'name': profile.name, 'key': registration.activation_key, } # composes activation email subject = render_to_string('emails/activation_email_subject.txt', context) # Email subject *must not* contain newlines subject = ''.join(subject.splitlines()) message = render_to_string('emails/activation_email.txt', context) from_address = microsite.get_value( 'email_from_address', settings.DEFAULT_FROM_EMAIL ) try: if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'): dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL'] message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) + '-' * 80 + '\n\n' + message) mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False) else: user.email_user(subject, message, from_address) except Exception: # pylint: disable=broad-except log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True) else: registration.activate() _enroll_user_in_pending_courses(user) # Enroll student in any pending courses # Immediately after a user creates an account, we log them in. They are only # logged in until they close the browser. They can't log in again until they click # the activation link from the email. new_user = authenticate(username=user.username, password=params['password']) login(request, new_user) request.session.set_expiry(0) # TODO: there is no error checking here to see that the user actually logged in successfully, # and is not yet an active user. if new_user is not None: AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username)) if do_external_auth: eamap.user = new_user eamap.dtsignup = datetime.datetime.now(UTC) eamap.save() AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username) AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap) if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'): log.info('bypassing activation email') new_user.is_active = True new_user.save() AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email)) return new_user def _enroll_user_in_pending_courses(student): """ Enroll student in any pending courses he/she may have. """ ceas = CourseEnrollmentAllowed.objects.filter(email=student.email) for cea in ceas: if cea.auto_enroll: enrollment = CourseEnrollment.enroll(student, cea.course_id) manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email) if manual_enrollment_audit is not None: # get the enrolled by user and reason from the ManualEnrollmentAudit table. # then create a new ManualEnrollmentAudit table entry for the same email # different transition state. ManualEnrollmentAudit.create_manual_enrollment_audit( manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED, manual_enrollment_audit.reason, enrollment ) @csrf_exempt def create_account(request, post_override=None): """ JSON call to create new edX account. Used by form in signup_modal.html, which is included into navigation.html """ warnings.warn("Please use RegistrationView instead.", DeprecationWarning) try: user = create_account_with_params(request, post_override or request.POST) except AccountValidationError as exc: return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400) except ValidationError as exc: field, error_list = next(exc.message_dict.iteritems()) return JsonResponse( { "success": False, "field": field, "value": error_list[0], }, status=400 ) redirect_url = None # The AJAX method calling should know the default destination upon success # Resume the third-party-auth pipeline if necessary. if third_party_auth.is_enabled() and pipeline.running(request): running_pipeline = pipeline.get(request) redirect_url = pipeline.get_complete_url(running_pipeline['backend']) response = JsonResponse({ 'success': True, 'redirect_url': redirect_url, }) set_logged_in_cookies(request, response, user) return response def auto_auth(request): """ Create or configure a user account, then log in as that user. Enabled only when settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true. Accepts the following querystring parameters: * `username`, `email`, and `password` for the user account * `full_name` for the user profile (the user's full name; defaults to the username) * `staff`: Set to "true" to make the user global staff. * `course_id`: Enroll the student in the course with `course_id` * `roles`: Comma-separated list of roles to grant the student in the course with `course_id` * `no_login`: Define this to create the user but not login * `redirect`: Set to "true" will redirect to course if course_id is defined, otherwise it will redirect to dashboard If username, email, or password are not provided, use randomly generated credentials. """ # Generate a unique name to use if none provided unique_name = uuid.uuid4().hex[0:30] # Use the params from the request, otherwise use these defaults username = request.GET.get('username', unique_name) password = request.GET.get('password', unique_name) email = request.GET.get('email', unique_name + "@example.com") full_name = request.GET.get('full_name', username) is_staff = request.GET.get('staff', None) is_superuser = request.GET.get('superuser', None) course_id = request.GET.get('course_id', None) # mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit' enrollment_mode = request.GET.get('enrollment_mode', 'honor') course_key = None if course_id: course_key = CourseLocator.from_string(course_id) role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()] redirect_when_done = request.GET.get('redirect', '').lower() == 'true' login_when_done = 'no_login' not in request.GET form = AccountCreationForm( data={ 'username': username, 'email': email, 'password': password, 'name': full_name, }, tos_required=False ) # Attempt to create the account. # If successful, this will return a tuple containing # the new user object. try: user, profile, reg = _do_create_account(form) except (AccountValidationError, ValidationError): # Attempt to retrieve the existing user. user = User.objects.get(username=username) user.email = email user.set_password(password) user.save() profile = UserProfile.objects.get(user=user) reg = Registration.objects.get(user=user) # Set the user's global staff bit if is_staff is not None: user.is_staff = (is_staff == "true") user.save() if is_superuser is not None: user.is_superuser = (is_superuser == "true") user.save() # Activate the user reg.activate() reg.save() # ensure parental consent threshold is met year = datetime.date.today().year age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT profile.year_of_birth = (year - age_limit) - 1 profile.save() # Enroll the user in a course if course_key is not None: CourseEnrollment.enroll(user, course_key, mode=enrollment_mode) # Apply the roles for role_name in role_names: role = Role.objects.get(name=role_name, course_id=course_key) user.roles.add(role) # Log in as the user if login_when_done: user = authenticate(username=username, password=password) login(request, user) create_comments_service_user(user) # Provide the user with a valid CSRF token # then return a 200 response unless redirect is true if redirect_when_done: # Redirect to course info page if course_id is known if course_id: try: # redirect to course info page in LMS redirect_url = reverse( 'info', kwargs={'course_id': course_id} ) except NoReverseMatch: # redirect to course outline page in Studio redirect_url = reverse( 'course_handler', kwargs={'course_key_string': course_id} ) else: try: # redirect to dashboard for LMS redirect_url = reverse('dashboard') except NoReverseMatch: # redirect to home for Studio redirect_url = reverse('home') return redirect(redirect_url) elif request.META.get('HTTP_ACCEPT') == 'application/json': response = JsonResponse({ 'created_status': u"Logged in" if login_when_done else "Created", 'username': username, 'email': email, 'password': password, 'user_id': user.id, # pylint: disable=no-member 'anonymous_id': anonymous_id_for_user(user, None), }) else: success_msg = u"{} user {} ({}) with password {} and user_id {}".format( u"Logged in" if login_when_done else "Created", username, email, password, user.id # pylint: disable=no-member ) response = HttpResponse(success_msg) response.set_cookie('csrftoken', csrf(request)['csrf_token']) return response @ensure_csrf_cookie def activate_account(request, key): """When link in activation e-mail is clicked""" regs = Registration.objects.filter(activation_key=key) if len(regs) == 1: user_logged_in = request.user.is_authenticated() already_active = True if not regs[0].user.is_active: regs[0].activate() already_active = False # Enroll student in any pending courses he/she may have if auto_enroll flag is set _enroll_user_in_pending_courses(regs[0].user) resp = render_to_response( "registration/activation_complete.html", { 'user_logged_in': user_logged_in, 'already_active': already_active } ) return resp if len(regs) == 0: return render_to_response( "registration/activation_invalid.html", {'csrf': csrf(request)['csrf_token']} ) return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened.")) @csrf_exempt @require_POST def password_reset(request): """ Attempts to send a password reset e-mail. """ # Add some rate limiting here by re-using the RateLimitMixin as a helper class limiter = BadRequestRateLimiter() if limiter.is_rate_limit_exceeded(request): AUDIT_LOG.warning("Rate limit exceeded in password_reset") return HttpResponseForbidden() form = PasswordResetFormNoActive(request.POST) if form.is_valid(): form.save(use_https=request.is_secure(), from_email=microsite.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL), request=request, domain_override=request.get_host()) # When password change is complete, a "edx.user.settings.changed" event will be emitted. # But because changing the password is multi-step, we also emit an event here so that we can # track where the request was initiated. tracker.emit( SETTING_CHANGE_INITIATED, { "setting": "password", "old": None, "new": None, "user_id": request.user.id, } ) else: # bad user? tick the rate limiter counter AUDIT_LOG.info("Bad password_reset user passed in.") limiter.tick_bad_request_counter(request) return JsonResponse({ 'success': True, 'value': render_to_string('registration/password_reset_done.html', {}), }) def password_reset_confirm_wrapper( request, uidb36=None, token=None, ): """ A wrapper around django.contrib.auth.views.password_reset_confirm. Needed because we want to set the user as active at this step. """ # cribbed from django.contrib.auth.views.password_reset_confirm try: uid_int = base36_to_int(uidb36) user = User.objects.get(id=uid_int) user.is_active = True user.save() except (ValueError, User.DoesNotExist): pass # tie in password strength enforcement as an optional level of # security protection err_msg = None if request.method == 'POST': password = request.POST['new_password1'] if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False): try: validate_password_length(password) validate_password_complexity(password) validate_password_dictionary(password) except ValidationError, err: err_msg = _('Password: ') + '; '.join(err.messages) # also, check the password reuse policy if not PasswordHistory.is_allowable_password_reuse(user, password): if user.is_staff: num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE'] else: num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE'] # Because of how ngettext is, splitting the following into shorter lines would be ugly. # pylint: disable=line-too-long err_msg = ungettext( "You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.", "You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.", num_distinct ).format(num=num_distinct) # also, check to see if passwords are getting reset too frequent if PasswordHistory.is_password_reset_too_soon(user): num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS'] # Because of how ngettext is, splitting the following into shorter lines would be ugly. # pylint: disable=line-too-long err_msg = ungettext( "You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.", "You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.", num_days ).format(num=num_days) if err_msg: # We have an password reset attempt which violates some security policy, use the # existing Django template to communicate this back to the user context = { 'validlink': True, 'form': None, 'title': _('Password reset unsuccessful'), 'err_msg': err_msg, 'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME), } return TemplateResponse(request, 'registration/password_reset_confirm.html', context) else: # we also want to pass settings.PLATFORM_NAME in as extra_context extra_context = {"platform_name": microsite.get_value('platform_name', settings.PLATFORM_NAME)} # Support old password reset URLs that used base36 encoded user IDs. # https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231 try: uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36)))) except ValueError: uidb64 = '1' # dummy invalid ID (incorrect padding for base64) if request.method == 'POST': # remember what the old password hash is before we call down old_password_hash = user.password result = password_reset_confirm( request, uidb64=uidb64, token=token, extra_context=extra_context ) # get the updated user updated_user = User.objects.get(id=uid_int) # did the password hash change, if so record it in the PasswordHistory if updated_user.password != old_password_hash: entry = PasswordHistory() entry.create(updated_user) return result else: return password_reset_confirm( request, uidb64=uidb64, token=token, extra_context=extra_context ) def reactivation_email_for_user(user): try: reg = Registration.objects.get(user=user) except Registration.DoesNotExist: return JsonResponse({ "success": False, "error": _('No inactive user with this e-mail exists'), }) # TODO: this should be status code 400 # pylint: disable=fixme context = { 'name': user.profile.name, 'key': reg.activation_key, } subject = render_to_string('emails/activation_email_subject.txt', context) subject = ''.join(subject.splitlines()) message = render_to_string('emails/activation_email.txt', context) try: user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) except Exception: # pylint: disable=broad-except log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True) return JsonResponse({ "success": False, "error": _('Unable to send reactivation email') }) # TODO: this should be status code 500 # pylint: disable=fixme return JsonResponse({"success": True}) def validate_new_email(user, new_email): """ Given a new email for a user, does some basic verification of the new address If any issues are encountered with verification a ValueError will be thrown. """ try: validate_email(new_email) except ValidationError: raise ValueError(_('Valid e-mail address required.')) if new_email == user.email: raise ValueError(_('Old email is the same as the new email.')) if User.objects.filter(email=new_email).count() != 0: raise ValueError(_('An account with this e-mail already exists.')) def do_email_change_request(user, new_email, activation_key=None): """ Given a new email for a user, does some basic verification of the new address and sends an activation message to the new address. If any issues are encountered with verification or sending the message, a ValueError will be thrown. """ pec_list = PendingEmailChange.objects.filter(user=user) if len(pec_list) == 0: pec = PendingEmailChange() pec.user = user else: pec = pec_list[0] # if activation_key is not passing as an argument, generate a random key if not activation_key: activation_key = uuid.uuid4().hex pec.new_email = new_email pec.activation_key = activation_key pec.save() context = { 'key': pec.activation_key, 'old_email': user.email, 'new_email': pec.new_email } subject = render_to_string('emails/email_change_subject.txt', context) subject = ''.join(subject.splitlines()) message = render_to_string('emails/email_change.txt', context) from_address = microsite.get_value( 'email_from_address', settings.DEFAULT_FROM_EMAIL ) try: mail.send_mail(subject, message, from_address, [pec.new_email]) except Exception: # pylint: disable=broad-except log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True) raise ValueError(_('Unable to send email activation link. Please try again later.')) # When the email address change is complete, a "edx.user.settings.changed" event will be emitted. # But because changing the email address is multi-step, we also emit an event here so that we can # track where the request was initiated. tracker.emit( SETTING_CHANGE_INITIATED, { "setting": "email", "old": context['old_email'], "new": context['new_email'], "user_id": user.id, } ) @ensure_csrf_cookie def confirm_email_change(request, key): # pylint: disable=unused-argument """ User requested a new e-mail. This is called when the activation link is clicked. We confirm with the old e-mail, and update """ with transaction.atomic(): try: pec = PendingEmailChange.objects.get(activation_key=key) except PendingEmailChange.DoesNotExist: response = render_to_response("invalid_email_key.html", {}) transaction.set_rollback(True) return response user = pec.user address_context = { 'old_email': user.email, 'new_email': pec.new_email } if len(User.objects.filter(email=pec.new_email)) != 0: response = render_to_response("email_exists.html", {}) transaction.set_rollback(True) return response subject = render_to_string('emails/email_change_subject.txt', address_context) subject = ''.join(subject.splitlines()) message = render_to_string('emails/confirm_email_change.txt', address_context) u_prof = UserProfile.objects.get(user=user) meta = u_prof.get_meta() if 'old_emails' not in meta: meta['old_emails'] = [] meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()]) u_prof.set_meta(meta) u_prof.save() # Send it to the old email... try: user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) except Exception: # pylint: disable=broad-except log.warning('Unable to send confirmation email to old address', exc_info=True) response = render_to_response("email_change_failed.html", {'email': user.email}) transaction.set_rollback(True) return response user.email = pec.new_email user.save() pec.delete() # And send it to the new email... try: user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) except Exception: # pylint: disable=broad-except log.warning('Unable to send confirmation email to new address', exc_info=True) response = render_to_response("email_change_failed.html", {'email': pec.new_email}) transaction.set_rollback(True) return response response = render_to_response("email_change_successful.html", address_context) return response @require_POST @login_required @ensure_csrf_cookie def change_email_settings(request): """Modify logged-in user's setting for receiving emails from a course.""" user = request.user course_id = request.POST.get("course_id") course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) receive_emails = request.POST.get("receive_emails") if receive_emails: optout_object = Optout.objects.filter(user=user, course_id=course_key) if optout_object: optout_object.delete() log.info( u"User %s (%s) opted in to receive emails from course %s", user.username, user.email, course_id, ) track.views.server_track( request, "change-email-settings", {"receive_emails": "yes", "course": course_id}, page='dashboard', ) else: Optout.objects.get_or_create(user=user, course_id=course_key) log.info( u"User %s (%s) opted out of receiving emails from course %s", user.username, user.email, course_id, ) track.views.server_track( request, "change-email-settings", {"receive_emails": "no", "course": course_id}, page='dashboard', ) return JsonResponse({"success": True}) def _get_course_programs(user, user_enrolled_courses): # pylint: disable=invalid-name """Build a dictionary of program data required for display on the student dashboard. Given a user and an iterable of course keys, find all programs relevant to the user and return them in a dictionary keyed by course key. Arguments: user (User): The user to authenticate as when requesting programs. user_enrolled_courses (list): List of course keys representing the courses in which the given user has active enrollments. Returns: dict, containing programs keyed by course. """ course_programs = get_programs_for_dashboard(user, user_enrolled_courses) programs_data = {} for course_key, programs in course_programs.viewitems(): for program in programs: if program.get('status') == 'active' and program.get('category') == 'xseries': try: programs_for_course = programs_data.setdefault(course_key, {}) programs_for_course.setdefault('course_program_list', []).append({ 'course_count': len(program['course_codes']), 'display_name': program['name'], 'program_id': program['id'], 'program_marketing_url': urljoin( settings.MKTG_URLS.get('ROOT'), 'xseries' + '/{}' ).format(program['marketing_slug']) }) programs_for_course['display_category'] = program.get('display_category') programs_for_course['category'] = program.get('category') except KeyError: log.warning('Program structure is invalid, skipping display: %r', program) return programs_data def _get_xseries_credentials(user): """Return program credentials data required for display on the learner dashboard. Given a user, find all programs for which certificates have been earned and return list of dictionaries of required program data. Arguments: user (User): user object for getting programs credentials. Returns: list of dict, containing data corresponding to the programs for which the user has been awarded a credential. """ programs_credentials = get_user_program_credentials(user) credentials_data = [] for program in programs_credentials: if program.get('category') == 'xseries': try: program_data = { 'display_name': program['name'], 'subtitle': program['subtitle'], 'credential_url': program['credential_url'], } credentials_data.append(program_data) except KeyError: log.warning('Program structure is invalid: %r', program) return credentials_data
agpl-3.0
-8,384,127,725,916,845,000
40.182839
150
0.646169
false
marcosgabbardo/nbaPredictor
lib/nba_csvgenerator.py
1
4987
import pymysql.cursors import csv # CSV file generator to upload in amazon machine learning with some statistics to use # in multiclass, binary or regression analisys def csvGenerator(league, now, cvs_file): connection = pymysql.connect(host='192.168.99.100', user='root', password='stalinmaki', db='sportbet', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) print "Generating Statistic CSV file..." c_game_stat = connection.cursor() sql = "SELECT game.id,home.name homename,away.name awayname, " sql = sql + " (home_stat.orb_avg1 - away_stat.orb_avg1) a_orb_avg1, " sql = sql + " (home_stat.ortg_avg1 - away_stat.ortg_avg1) a_ortg_avg1, " sql = sql + " (home_stat.orb_avg3 - away_stat.orb_avg3) a_orb_avg3, " sql = sql + " (home_stat.ortg_avg3 - away_stat.ortg_avg3) a_ortg_avg3, " sql = sql + " (home_stat.orb_avg5 - away_stat.orb_avg5) a_orb_avg5, " sql = sql + " (home_stat.ortg_avg5 - away_stat.ortg_avg5) a_ortg_avg5, " sql = sql + " case when home_point > away_point then home.name " sql = sql + " when home_point < away_point then away.name end decisor " sql = sql + " FROM nba_game game, nba_team home, nba_team away, nba_team_history home_stat, nba_team_history away_stat " sql = sql + " WHERE home.name = game.home_name AND away.name = game.away_name AND home_stat.team_name = home.name " sql = sql + " AND away_stat.team_name = away_name AND game.date = home_stat.date AND game.date = away_stat.date " sql = sql + " AND home.type = away.type and home.type = %s AND game.date < %s ORDER BY game.date " c_game_stat.execute(sql, (league, now)) rows = c_game_stat.fetchall() fp_url = 'data/'+ cvs_file fp = open(fp_url, 'w') myFile = csv.writer(fp) myFile.writerow(['ID','HOME','AWAY','ORB1','RTG1', 'ORB3','RTG3',"ORB5",'RTG5','DECISOR']) for x in rows: v_id = x["id"] v_homename = x["homename"] v_orb1 = x["a_orb_avg1"] v_orb3 = x["a_orb_avg3"] v_orb5 = x["a_orb_avg5"] v_rtg1 = x["a_ortg_avg1"] v_rtg3 = x["a_ortg_avg3"] v_rtg5 = x["a_ortg_avg5"] v_awayname = x["awayname"] v_decisor = x["decisor"] myFile.writerow([v_id,v_homename,v_awayname,v_orb1,v_rtg1, v_orb3, v_rtg3,v_orb5,v_rtg5,v_decisor]) fp.close() c_game_stat.close() connection.close() def predictCsvGenerator(league, now, cvs_file): connection = pymysql.connect(host='192.168.99.100', user='root', password='stalinmaki', db='sportbet', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) print "Generating Statistic CSV file..." c_game_stat = connection.cursor() sql = "SELECT game.id,home.name homename,away.name awayname, " sql = sql + " (home_stat.orb_avg1 - away_stat.orb_avg1) a_orb_avg1, " sql = sql + " (home_stat.ortg_avg1 - away_stat.ortg_avg1) a_ortg_avg1, " sql = sql + " (home_stat.orb_avg3 - away_stat.orb_avg3) a_orb_avg3, " sql = sql + " (home_stat.ortg_avg3 - away_stat.ortg_avg3) a_ortg_avg3, " sql = sql + " (home_stat.orb_avg5 - away_stat.orb_avg5) a_orb_avg5, " sql = sql + " (home_stat.ortg_avg5 - away_stat.ortg_avg5) a_ortg_avg5, " sql = sql + " case when home_point > away_point then home.name " sql = sql + " when home_point < away_point then away.name end decisor " sql = sql + " FROM nba_game game, nba_team home, nba_team away, nba_team_history home_stat, nba_team_history away_stat " sql = sql + " WHERE home.name = game.home_name AND away.name = game.away_name AND home_stat.team_name = home.name " sql = sql + " AND away_stat.team_name = away_name AND game.date = home_stat.date AND game.date = away_stat.date " sql = sql + " AND home.type = away.type and home.type = %s AND game.date = %s ORDER BY game.date " c_game_stat.execute(sql, (league, now)) rows = c_game_stat.fetchall() fp_url = 'data/'+ cvs_file fp = open(fp_url, 'w') myFile = csv.writer(fp) myFile.writerow(['ID','HOME','AWAY','ORB1','RTG1', 'ORB3','RTG3',"ORB5",'RTG5','DECISOR']) for x in rows: v_id = x["id"] v_homename = x["homename"] v_orb1 = x["a_orb_avg1"] v_orb3 = x["a_orb_avg3"] v_orb5 = x["a_orb_avg5"] v_rtg1 = x["a_ortg_avg1"] v_rtg3 = x["a_ortg_avg3"] v_rtg5 = x["a_ortg_avg5"] v_awayname = x["awayname"] v_decisor = x["decisor"] myFile.writerow([v_id,v_homename,v_awayname,v_orb1,v_rtg1, v_orb3, v_rtg3,v_orb5,v_rtg5,v_decisor]) fp.close() c_game_stat.close() connection.close()
mit
-3,788,412,098,069,962,000
38.587302
124
0.569882
false
jacenkow/inspire-next
tests/integration/disambiguation/test_receivers.py
1
6418
# -*- coding: utf-8 -*- # # This file is part of INSPIRE. # Copyright (C) 2016 CERN. # # INSPIRE is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # INSPIRE is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with INSPIRE. If not, see <http://www.gnu.org/licenses/>. # # In applying this licence, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. from __future__ import ( absolute_import, division, print_function, ) from copy import deepcopy from uuid import uuid4 from sqlalchemy import desc from invenio_pidstore.models import PersistentIdentifier from invenio_records.api import Record from inspirehep.modules.disambiguation.models import DisambiguationRecord from inspirehep.modules.disambiguation.receivers import ( append_new_record_to_queue, append_updated_record_to_queue, ) class _IdDict(dict): def __init__(self, *args, **kwargs): super(_IdDict, self).__init__(*args, **kwargs) self._id = uuid4() @property def id(self): return self._id def test_append_new_record_to_queue_method(small_app): """Test the receiver responsible for queuing new HEP records.""" sample_hep_record = _IdDict({ '$schema': 'http://localhost:5000/schemas/records/hep.json', 'authors': [{ 'affiliations': [{'value': 'Copenhagen U.'}], 'curated_relation': False, 'full_name': 'Glashow, S.L.', 'signature_block': 'GLASs', 'uuid': '5ece3c81-0a50-481d-8bee-5f78576e9504' }], 'collections': [ {'primary': 'CORE'}, {'primary': 'HEP'} ], 'control_number': '4328', 'self': {'$ref': 'http://localhost:5000/api/literature/4328'}, 'titles': [{'title': 'Partial Symmetries of Weak Interactions'}] }) append_new_record_to_queue(sample_hep_record) assert str(sample_hep_record.id) == \ DisambiguationRecord.query.order_by(desc("id")).first().record_id def test_append_new_record_to_queue_method_not_hep_record(small_app): """Test if the receiver will skip a new publication, not HEP.""" sample_author_record = _IdDict({ '$schema': 'http://localhost:5000/schemas/records/authors.json', 'collections': [{'primary': 'HEPNAMES'}], 'control_number': '314159265', 'name': {'value': 'Glashow, S.L.'}, 'positions': [{'institution': {'name': 'Copenhagen U.'}}], 'self': {'$ref': 'http://localhost:5000/api/authors/314159265'}}) append_new_record_to_queue(sample_author_record) assert str(sample_author_record.id) != \ DisambiguationRecord.query.order_by(desc("id")).first().record_id def test_append_updated_record_to_queue(small_app): """Test the receiver responsible for queuing updated HEP records.""" pid = PersistentIdentifier.get("literature", 4328) publication_id = str(pid.object_uuid) record = Record.get_record(publication_id) record_to_update = deepcopy(record) record_to_update['authors'][0]['full_name'] = "John Smith" append_updated_record_to_queue(None, record_to_update, record_to_update, "records-hep", "hep") assert str(record_to_update.id) == \ DisambiguationRecord.query.order_by(desc("id")).first().record_id def test_append_updated_record_to_queue_new_record(small_app): """Test if the receiver will return None, since the record will not be found in the Elasticsearch instance. This record will be caught by 'append_new_record_to_queue' signal. """ sample_hep_record = _IdDict({ '$schema': 'http://localhost:5000/schemas/records/hep.json', 'authors': [{ 'affiliations': [{'value': 'Copenhagen U.'}], 'curated_relation': False, 'full_name': 'Glashow, S.L.', 'signature_block': 'GLASs', 'uuid': '5ece3c81-0a50-481d-8bee-5f78576e9504' }], 'collections': [ {'primary': 'CORE'}, {'primary': 'HEP'} ], 'control_number': '4328', 'self': {'$ref': 'http://localhost:5000/api/literature/4328'}, 'titles': [{'title': 'Partial Symmetries of Weak Interactions'}] }) result = append_updated_record_to_queue(None, sample_hep_record, sample_hep_record, "records-hep", "hep") assert result is None assert str(sample_hep_record.id) != \ DisambiguationRecord.query.order_by(desc("id")).first().record_id def test_append_updated_record_to_queue_not_hep_record(small_app): """Test if the receiver will skip an updated publication, not HEP.""" sample_author_record = _IdDict({ '$schema': 'http://localhost:5000/schemas/records/authors.json', 'collections': [{'primary': 'HEPNAMES'}], 'control_number': '314159265', 'name': {'value': 'Glashow, S.L.'}, 'positions': [{'institution': {'name': 'Copenhagen U.'}}], 'self': {'$ref': 'http://localhost:5000/api/authors/314159265'}}) append_updated_record_to_queue(None, sample_author_record, sample_author_record, "records-authors", "authors") assert str(sample_author_record.id) != \ DisambiguationRecord.query.order_by(desc("id")).first().record_id def test_append_updated_record_to_queue_same_data(small_app): """Check if for the same record, the receiver will skip the publication.""" pid = PersistentIdentifier.get("literature", 11883) publication_id = str(pid.object_uuid) record = Record.get_record(publication_id) append_updated_record_to_queue(None, record, record, "records-hep", "hep") assert str(record.id) != \ DisambiguationRecord.query.order_by(desc("id")).first().record_id
gpl-2.0
-1,031,608,692,336,628,200
36.098266
79
0.629791
false
llambiel/cloud-canary
api-canary.py
1
4241
#!/usr/bin/python # -*- coding: utf-8 -*- # Loic Lambiel © # License MIT import sys import argparse import logging import logging.handlers import time import socket try: from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver except ImportError: print ("It look like libcloud module isn't installed. Please install it using pip install apache-libcloud") sys.exit(1) try: import bernhard except ImportError: print ("It look like riemann client (bernard) isn't installed. Please install it using pip install bernhard") sys.exit(1) try: from configparser import ConfigParser except ImportError: # python 2 from ConfigParser import ConfigParser logfile = "/var/log/api-canary.log" logging.basicConfig(format='%(asctime)s %(pathname)s %(levelname)s:%(message)s', level=logging.DEBUG, filename=logfile) logging.getLogger().addHandler(logging.StreamHandler()) API_HOST = get_driver(Provider.EXOSCALE).host def main(): parser = argparse.ArgumentParser(description='This script perform a list_size API query on exoscale public cloud. If any error occur during the process, an alarm is being sent to riemann monitoring. time metric is also sent to riemann') parser.add_argument('-version', action='version', version='%(prog)s 1.0, Loic Lambiel, exoscale') parser.add_argument('-acskey', help='Cloudstack API user key', required=True, type=str, dest='acskey') parser.add_argument('-acssecret', help='Cloudstack API user secret', required=True, type=str, dest='acssecret') parser.add_argument('-host', help='Cloudstack enpoint', required=False, type=str, default=API_HOST, dest='host') args = vars(parser.parse_args()) return args def list_size(args): key = args['acskey'] secret = args['acssecret'] host = args['host'] cls = get_driver(Provider.EXOSCALE) driver = cls(key, secret, host=host) logging.info('Performing query') size = driver.list_sizes() micro = False for item in size: if item.name == 'Micro': micro = True if micro is False: raise Exception("API call did not returned Micro instance type. This means the API isn't working correctly") # main if __name__ == "__main__": args = main() conf = ConfigParser() conf.read(("/etc/bernhard.conf",)) client = bernhard.SSLClient(host=conf.get('default', 'riemann_server'), port=int(conf.get('default', 'riemann_port')), keyfile=conf.get('default', 'tls_cert_key'), certfile=conf.get('default', 'tls_cert'), ca_certs=conf.get('default', 'tls_ca_cert')) start_time = time.time() try: list_size(args) exectime = time.time() - start_time host = socket.gethostname() client.send({'host': host, 'service': "api_canary.exectime", 'state': 'ok', 'tags': ['duration'], 'ttl': 600, 'metric': exectime}) client.send({'host': host, 'service': "api_canary.check", 'state': 'ok', 'tags': ['api_canary.py', 'duration'], 'ttl': 600, 'metric': 0}) logging.info('Script completed successfully') except Exception as e: logging.exception("An exception occured. Exception is: %s", e) host = socket.gethostname() exectime = 61 txt = 'An exception occurred on api_canary.py: %s. See logfile %s for more info' % (e, logfile) client.send({'host': host, 'service': "api_canary.check", 'description': txt, 'state': 'critical', 'tags': ['api_canary.py', 'duration'], 'ttl': 600, 'metric': 1}) client.send({'host': host, 'service': "api_canary.exectime", 'state': 'ok', 'tags': ['duration'], 'ttl': 600, 'metric': exectime}) raise
mit
7,910,202,042,723,865,000
34.041322
240
0.579245
false
alexisbellido/django-vodkamartini-qa
vodkamartiniqa/views/answers.py
1
1248
from django.http import HttpResponse from vodkamartiniqa.views.helpers import get_answers from vodkamartiniqa.views.helpers import get_questions import json #from django.core.serializers.json import DjangoJSONEncoder # TODO get answers via ajax with start and end def get_answers_ajax(request, question_id, start=0, end=8): objects = get_answers(question_id, start, end) answers = [] for object in objects: submit_date = object.submit_date.strftime("%B %e, %Y") answers.append({ 'id': object.id, 'answer': object.answer, 'user': object.user.username, 'user_picture': object.user.drupaluser.picture, 'votes_up': object.votes_up, 'votes_down': object.votes_down, 'posted_by_expert': object.posted_by_expert, 'submit_date': submit_date, }) return HttpResponse(json.dumps(answers), mimetype='application/json') # use DjangoJSONEncoder to pass datetime objects to json #return HttpResponse(json.dumps(answers, cls=DjangoJSONEncoder), mimetype='application/json')
bsd-3-clause
1,810,627,598,814,073,600
47
97
0.596955
false
coagulant/cleanweb
test_cleanweb.py
1
8671
# coding: utf-8 from __future__ import unicode_literals from unittest import TestCase from httpretty import HTTPretty, PY3, parse_qs import pytest from cleanweb import Cleanweb, CleanwebError class HttprettyCase(TestCase): def setUp(self): HTTPretty.reset() HTTPretty.enable() def tearDown(self): HTTPretty.disable() def assertBodyQueryString(self, **kwargs): """ Hakish, but works %(""" if PY3: qs = parse_qs(HTTPretty.last_request.body.decode('utf-8')) else: qs = dict((key, [values[0].decode('utf-8')]) for key, values in parse_qs(HTTPretty.last_request.body).items()) assert kwargs == qs class Api(HttprettyCase): def test_raises_exception_when_instantiated_with_no_key(self): with pytest.raises(CleanwebError) as excinfo: Cleanweb() assert str(excinfo.value) == "Cleanweb needs API key to operate. Get it here: http://api.yandex.ru/cleanweb/form.xml" def test_xml_error_is_handled(self): error_repsonse = """ <!DOCTYPE get-captcha-result PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <error key="key-not-registered"><message>Provided API key not registered</message></error>""" HTTPretty.register_uri(HTTPretty.GET, "http://cleanweb-api.yandex.ru/1.0/get-captcha", body=error_repsonse, status=403) with pytest.raises(CleanwebError) as excinfo: Cleanweb(key='xxx').get_captcha() assert str(excinfo.value) == 'Provided API key not registered (key-not-registered)' class CheckSpam(HttprettyCase): def setUp(self): super(CheckSpam, self).setUp() self.ham_response = """ <!DOCTYPE check-spam-result PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <check-spam-result> <id>123456789abcd</id> <text spam-flag="no" /> <links></links> </check-spam-result>""" self.spam_response = """ <!DOCTYPE check-spam-result PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <check-spam-result> <id>123456789efgh</id> <text spam-flag="yes" /> <links> <link href="http://cnn.com" spam-flag="yes" /> <link href="http://yandex.ru" spam-flag="no" /> </links> </check-spam-result>""" def test_is_not_spam(self): HTTPretty.register_uri(HTTPretty.POST, "http://cleanweb-api.yandex.ru/1.0/check-spam", body=self.ham_response) assert Cleanweb(key='yyy').check_spam(body='Питон') == { 'id': '123456789abcd', 'spam_flag': False, 'links': [] } assert HTTPretty.last_request.method == "POST" self.assertBodyQueryString(**{'body-plain': ['Питон']}) assert HTTPretty.last_request.querystring == {'key': ['yyy']} def test_is_spam(self): HTTPretty.register_uri(HTTPretty.POST, "http://cleanweb-api.yandex.ru/1.0/check-spam", body=self.spam_response) spam_text = 'ШОК! Видео скачать без СМС! http://cnn.com http://yandex.ru' spam_or_ham = Cleanweb(key='yyy').check_spam(subject=spam_text, body='123', ip='10.178.33.2', name='Vasia', body_type='html') assert spam_or_ham == { 'id': '123456789efgh', 'spam_flag': True, 'links': [('http://cnn.com', True), ('http://yandex.ru', False)] } assert HTTPretty.last_request.method == "POST" self.assertBodyQueryString(**{'ip': ['10.178.33.2'], 'body-html': ['123'], 'subject-plain': [spam_text], 'name': ['Vasia']}) assert HTTPretty.last_request.querystring == { 'key': ['yyy'], } class GetCaptcha(HttprettyCase): def setUp(self): super(GetCaptcha, self).setUp() self.valid_response = """ <!DOCTYPE get-captcha-result PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <get-captcha-result> <captcha>abcd12345</captcha> <url>http://i.captcha.yandex.net/image?key=abcd12345</url> </get-captcha-result> """ def test_can_be_obtained_without_msg_id(self): HTTPretty.register_uri(HTTPretty.GET, "http://cleanweb-api.yandex.ru/1.0/get-captcha", body=self.valid_response) assert Cleanweb(key='xxx').get_captcha() == {'captcha': 'abcd12345', 'url': 'http://i.captcha.yandex.net/image?key=abcd12345'} assert HTTPretty.last_request.querystring == { "key": ["xxx"], } def test_can_be_obtained_with_msg_id(self): HTTPretty.register_uri(HTTPretty.GET, "http://cleanweb-api.yandex.ru/1.0/get-captcha", body=self.valid_response) Cleanweb(key='xxx').get_captcha(id='somekindofmsgid') == {'captcha': 'abcd12345', 'url': 'http://i.captcha.yandex.net/image?key=abcd12345'} assert HTTPretty.last_request.querystring == { "key": ["xxx"], "id": ["somekindofmsgid"] } class CheckCaptcha(HttprettyCase): def setUp(self): super(CheckCaptcha, self).setUp() self.valid_response = """ <!DOCTYPE check-captcha-result PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <check-captcha-result> <ok /> </check-captcha-result> """ self.invalid_response = """ <!DOCTYPE check-captcha-result PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <check-captcha-result xmlns:x="http://www.yandex.ru/xscript"> <failed></failed> </check-captcha-result> """ def test_valid_captcha_no_msg_is_ok(self): HTTPretty.register_uri(HTTPretty.GET, "http://cleanweb-api.yandex.ru/1.0/check-captcha", body=self.valid_response) assert Cleanweb(key='xxx').check_captcha(captcha='abcd12345', value='48151632') assert HTTPretty.last_request.querystring == { "key": ["xxx"], "captcha": ["abcd12345"], "value": ["48151632"], } def test_valid_captcha_msg_is_ok(self): HTTPretty.register_uri(HTTPretty.GET, "http://cleanweb-api.yandex.ru/1.0/check-captcha", body=self.valid_response) assert Cleanweb(key='xxx').check_captcha(id='somekindofmsgid', captcha='abcd12345', value='48151632') assert HTTPretty.last_request.querystring == { "key": ["xxx"], "captcha": ["abcd12345"], "value": ["48151632"], "id": ["somekindofmsgid"] } def test_invalid_captcha(self): HTTPretty.register_uri(HTTPretty.GET, "http://cleanweb-api.yandex.ru/1.0/check-captcha", body=self.invalid_response) assert Cleanweb(key='xxx').check_captcha(id='somekindofmsgid', captcha='abcd12345', value='000') == False assert HTTPretty.last_request.querystring == { "key": ["xxx"], "captcha": ["abcd12345"], "value": ["000"], "id": ["somekindofmsgid"] } class Complain(HttprettyCase): def setUp(self): super(Complain, self).setUp() self.valid_response = """ <complain-result> <ok/> </complain-result>""" def test_spam_is_ham(self): HTTPretty.register_uri(HTTPretty.POST, "http://cleanweb-api.yandex.ru/1.0/complain", body=self.valid_response) Cleanweb(key='zzz').complain(id='somekindofmsgid', is_spam=True) self.assertBodyQueryString(spamtype=['spam'], id=['somekindofmsgid']) def test_spam_is_spam(self): HTTPretty.register_uri(HTTPretty.POST, "http://cleanweb-api.yandex.ru/1.0/complain", body=self.valid_response) Cleanweb(key='zzz').complain(id='somekindofmsgid', is_spam=False) self.assertBodyQueryString(spamtype=['ham'], id=['somekindofmsgid'])
mit
4,414,990,219,486,617,000
40.941748
145
0.569907
false
romasch/estudioctl
elogger.py
1
2910
# ---------------------------------------------------------------------------- # Helper functions for logging and printing. # #For a systemwide unique logger, import the module like this: #from logger import SystemLogger # # ---------------------------------------------------------------------------- import os import time import traceback import config # Try to enable nicely colored console output. _has_colorama = False try: from colorama import init, Fore, Back, Style init() _has_colorama = True except ImportError: pass # The logger class. For exception safety this should be used inside a 'with' statement. class Logger: # Initialization and teardown def __init__ (self, a_file, a_level): self._file_name = a_file self._verbosity = a_level self._log_file = None self._start = 0.0 def __enter__ (self): open (self._file_name, 'w').close() # Clear log file. self._log_file = open (self._file_name, 'w') # Open log file. self._start = time.time() # Remember start time. return self def __exit__ (self, etype, evalue, etrace): if evalue != None: self._to_logfile ("An exception happened.\n\n" + traceback.format_exc()) self.info("All done (duration " + self._duration() + ")", force=True) self._log_file.close() # Private implementation features. def _to_logfile (self, text): self._log_file.write(self._duration()) self._log_file.write(" --- ") self._log_file.write(text) self._log_file.write("\n") self._log_file.flush() def _duration (self): return time.strftime('%H:%M:%S', time.gmtime(time.time() - self._start)) # Public methods def debug (self, text): if self._verbosity > 2: print (text) self._to_logfile(text) def info (self, text, pre='', force=False): if self._verbosity > 1 or force: print (pre + text) self._to_logfile(pre + text) def warning (self, text, pre=''): if self._verbosity > 0: if _has_colorama: print (pre + Back.YELLOW + Fore.YELLOW + Style.BRIGHT + text + Style.RESET_ALL) else: print (pre + text) self._to_logfile(pre + text) def error (self, text, pre=''): if self._verbosity > 0: if _has_colorama: print (pre + Back.RED + Fore.RED + Style.BRIGHT + text + Style.RESET_ALL) else: print (pre + text) self._to_logfile(pre + text) def success (self, text, pre=''): if self._verbosity > 0: if _has_colorama: print (pre + Back.GREEN + Fore.GREEN + Style.BRIGHT + text + Style.RESET_ALL) else: print (pre + text) self._to_logfile(pre + text) def get_file(self): return self._log_file # A unique system-wide logger. SystemLogger = Logger(os.path.join (config.base_directory(), config.v_dir_build, config.v_log_filename), config.v_verbose_level) SystemLogger.__enter__() import atexit import sys def _stop (logger): etype, evalue, etrace = sys.exc_info() logger.__exit__ (etype, evalue, etrace) atexit.register (_stop, SystemLogger)
bsd-3-clause
7,431,526,109,932,331,000
25.697248
128
0.627148
false
simsong/grr-insider
lib/rdfvalues/client_test.py
1
4020
#!/usr/bin/env python """Test client RDFValues.""" from grr.lib import rdfvalue from grr.lib.rdfvalues import test_base from grr.proto import jobs_pb2 class UserTests(test_base.RDFValueTestCase): """Test the User ProtoStruct implementation.""" rdfvalue_class = rdfvalue.User USER_ACCOUNT = dict( username=u"user", full_name=u"John Smith", comment=u"This is a user", last_logon=10000, domain=u"Some domain name", homedir=u"/home/user", sid=u"some sid") def GenerateSample(self, number=0): result = rdfvalue.User(username="user%s" % number) result.special_folders.desktop = "User Desktop %s" % number return result def testCompatibility(self): proto = jobs_pb2.User(username="user1") proto.special_folders.desktop = "User Desktop 1" serialized = proto.SerializeToString() fast_proto = rdfvalue.User(serialized) self.assertEqual(fast_proto.username, proto.username) self.assertEqual(fast_proto.special_folders.desktop, proto.special_folders.desktop) # Serialized form of both should be the same. self.assertProtoEqual(fast_proto, proto) def testTimeEncoding(self): fast_proto = rdfvalue.User(username="user") # Check that we can coerce an int to an RDFDatetime. fast_proto.last_logon = 1365177603180131 self.assertEqual(str(fast_proto.last_logon), "2013-04-05 16:00:03") self.assertEqual(type(fast_proto.last_logon), rdfvalue.RDFDatetime) # Check that this is backwards compatible with the old protobuf library. proto = jobs_pb2.User() proto.ParseFromString(fast_proto.SerializeToString()) # Old implementation should just see the last_logon field as an integer. self.assertEqual(proto.last_logon, 1365177603180131) self.assertEqual(type(proto.last_logon), long) # fast protobufs interoperate with old serialized formats. serialized_data = proto.SerializeToString() fast_proto = rdfvalue.User(serialized_data) self.assertEqual(fast_proto.last_logon, 1365177603180131) self.assertEqual(type(fast_proto.last_logon), rdfvalue.RDFDatetime) def testPrettyPrintMode(self): for mode, result in [ (0775, "-rwxrwxr-x"), (075, "----rwxr-x"), (0, "----------"), # DIR (040775, "drwxrwxr-x"), # SUID (35232, "-rwSr-----"), # GID (34208, "-rw-r-S---"), # CHR (9136, "crw-rw---T"), # BLK (25008, "brw-rw----"), # Socket (49663, "srwxrwxrwx"), # Sticky (33791, "-rwxrwxrwt"), # Sticky, not x (33784, "-rwxrwx--T"), ]: value = rdfvalue.StatMode(mode) self.assertEqual(unicode(value), result) def testConvertToKnowledgeBaseUser(self): folders = rdfvalue.FolderInformation(desktop="/usr/local/test/Desktop") user = rdfvalue.User(username="test", domain="test.com", homedir="/usr/local/test", special_folders=folders) kbuser = user.ToKnowledgeBaseUser() self.assertEqual(kbuser.username, "test") self.assertEqual(kbuser.userdomain, "test.com") self.assertEqual(kbuser.homedir, "/usr/local/test") self.assertEqual(kbuser.desktop, "/usr/local/test/Desktop") def testConvertFromKnowledgeBaseUser(self): kbuser = rdfvalue.KnowledgeBaseUser(username="test", userdomain="test.com", homedir="/usr/local/test", desktop="/usr/local/test/Desktop", localappdata="/usr/local/test/AppData") user = rdfvalue.User().FromKnowledgeBaseUser(kbuser) self.assertEqual(user.username, "test") self.assertEqual(user.domain, "test.com") self.assertEqual(user.homedir, "/usr/local/test") self.assertEqual(user.special_folders.desktop, "/usr/local/test/Desktop") self.assertEqual(user.special_folders.local_app_data, "/usr/local/test/AppData")
apache-2.0
-8,004,924,776,507,574,000
33.956522
79
0.642289
false
MAPC/warren-st-development-database
developmentdatabase/settings/base.py
1
7562
# BASE SETTINGS COMMON TO ALL ENVIRONMENTS import os, sys, dj_database_url from django.core.exceptions import ImproperlyConfigured def get_env_variable(var_name): """ Get the environment variable or return exception """ try: return os.environ[var_name] except KeyError: error_msg = "Set the %s environment variable" % var_name raise ImproperlyConfigured(error_msg) abspath = lambda *p: os.path.abspath(os.path.join(*p)) PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) ADMINS = ( ('Matt Cloyd', '[email protected]'), ) MANAGERS = ADMINS # The current environment. Set the environment variable ENV_TYPE in each environment # to one of: [local, staging, production]. # TODO: DJANGO_SETTINGS_MODULE = eval(developmentdatabase.settings.#{get_env_variable("ENV_TYPE")}) DJANGO_SETTINGS_MODULE = get_env_variable("ENV_TYPE_FULL") SECRET_KEY = get_env_variable("SECRET_KEY") BING_API_KEY = get_env_variable("BING_API_KEY") WSAPIKEY = get_env_variable("WSAPIKEY") DATABASE_URL = get_env_variable("DATABASE_URL") DATABASES = { 'default': dj_database_url.config(default=DATABASE_URL) } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = abspath(PROJECT_ROOT, 'media') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = 'staticfiles' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_ROOT, 'static/'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.contrib.messages.context_processors.messages', # flash 'developmentdatabase.context_processors.template_settings', 'tim.context_processors.auth_variables',) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', # required to be listed before flash 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.middleware.transaction.TransactionMiddleware', 'reversion.middleware.RevisionMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # flash # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'developmentdatabase.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'developmentdatabase.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_ROOT, 'templates/'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.humanize', 'grappelli', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: 'django.contrib.admindocs', # 3rd party 'userena', 'guardian', 'easy_thumbnails', 'reversion', 'profiles', 'bootstrap', 'tastypie', # project 'development', 'tim', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # flash MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' # tastypie API_LIMIT_PER_PAGE = 20 # Userena AUTHENTICATION_BACKENDS = ( 'userena.backends.UserenaAuthenticationBackend', 'guardian.backends.ObjectPermissionBackend', 'django.contrib.auth.backends.ModelBackend', ) # TODO: Why are these set here? Why is this not in URLConfs? AUTH_PROFILE_MODULE = 'profiles.Profile' LOGIN_REDIRECT_URL = '/projects/search/' LOGIN_URL = '/accounts/signin/' LOGOUT_URL = '/accounts/signout/' USERENA_MUGSHOT_GRAVATAR = False USERENA_MUGSHOT_DEFAULT = STATIC_URL + 'img/mugshot.png' USERENA_DISABLE_PROFILE_LIST = True USERENA_HIDE_EMAIL = True # Guardian ANONYMOUS_USER_ID = -1 # WalkScore API WS_MORE_INFO_ICON = 'http://www2.walkscore.com/images/api-more-info.gif' WS_MORE_INFO_LINK = 'http://www.walkscore.com/how-it-works.shtml' WS_LOGO_URL = 'http://www2.walkscore.com/images/api-logo.gif'
bsd-3-clause
3,250,895,186,263,924,700
32.910314
99
0.714229
false
jmankiewicz/odooAddons
hr_attendance_new_check/models/hr_attendance.py
1
2297
# -*- coding: utf-8 -*- from openerp import models, fields, api from openerp.exceptions import ValidationError class Attendance(models.Model): _inherit="hr.attendance" validate = fields.Boolean(string='Validate', help='Check to let Odoo validate if this attendance-event is plausible.', default=True) valid = fields.Boolean(string='Valid', help='Indicates, that this item has passed validation successfully.', compute='_compute_valid',default=False, readonly=True) worked_hours = fields.Float(string='Worked Hours', store=True, compute='_worked_hours_compute') action = fields.Selection([('sign_in', 'Sign In'), ('sign_out', 'Sign Out'), ('action','Action')], 'Action', required=True) @api.multi @api.onchange('valid','name') def _worked_hours_compute(self): for obj in self: if obj.action == 'sign_in': obj.worked_hours = 0 elif obj.action == 'sign_out' and obj.valid: # Get the associated sign-in last_signin_id = self.search(cr, uid, [ ('employee_id', '=', obj.employee_id.id), ('valid', '=', True), ('name', '<', obj.name), ('action', '=', 'sign_in') ], limit=1, order='name DESC') if last_signin_id: last_signin = self.browse(cr, uid, last_signin_id, context=context)[0] # Compute time elapsed between sign-in and sign-out last_signin_datetime = datetime.strptime(last_signin.name, '%Y-%m-%d %H:%M:%S') signout_datetime = datetime.strptime(obj.name, '%Y-%m-%d %H:%M:%S') workedhours_datetime = (signout_datetime - last_signin_datetime) obj.worked_hours = ((workedhours_datetime.seconds) / 60) / 60.0 else: obj.worked_hours = False @api.multi @api.onchange('validate','name','action') def _compute_valid(self): for obj in self: obj.valid = super(Attendance,obj)._altern_si_so() @api.multi @api.constrains('name','action','validate') def _constrain_valid(self): for obj in self: if obj.validate and not super(Attendance,obj)._altern_si_so(): raise ValidationError('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)') @api.one def _altern_si_so(self): #Dummy to override old constraint return True _constraints = [(_altern_si_so, 'Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)', ['action'])] #Override old constraint
agpl-3.0
-6,385,415,243,148,886,000
43.173077
164
0.670004
false
wazo-pbx/wazo-plugind
wazo_plugind/root_worker.py
1
4066
# Copyright 2017 The Wazo Authors (see the AUTHORS file) # SPDX-License-Identifier: GPL-3.0-or-later import logging import signal import os import sys from multiprocessing import Event, Process, Queue from queue import Empty from threading import Lock from .helpers import exec_and_log logger = logging.getLogger(__name__) class BaseWorker: name = 'base' def __init__(self): self._command_queue = Queue() self._result_queue = Queue() self._stop_requested = Event() self._command_queue_lock = Lock() self._process = Process(target=_run, args=(self._command_queue, self._result_queue, self._stop_requested)) def __enter__(self): self.run() return self def __exit__(self, *args): self.stop() def run(self): logger.info('starting %s worker', self.name) self._process.start() def stop(self): logger.info('stopping %s worker', self.name) # unblock the command_queue in the worker self._stop_requested.set() # close both queues self._command_queue.close() self._command_queue.join_thread() self._result_queue.close() self._result_queue.join_thread() # wait for the worker process to stop if self._process.is_alive(): self._process.join() logger.info('%s worker stopped', self.name) def send_cmd_and_wait(self, cmd, *args, **kwargs): if not self._process.is_alive(): logger.info('%s process is dead quitting', self.name) # kill the main thread os.kill(os.getpid(), signal.SIGTERM) # shutdown the current thread execution so that executor.shutdown does not block sys.exit(1) with self._command_queue_lock: self._command_queue.put((cmd, args, kwargs)) return self._result_queue.get() class RootWorker(BaseWorker): name = 'root' def apt_get_update(self, *args, **kwargs): return self.send_cmd_and_wait('update', *args, **kwargs) def install(self, *args, **kwargs): return self.send_cmd_and_wait('install', *args, **kwargs) def uninstall(self, *args, **kwargs): return self.send_cmd_and_wait('uninstall', *args, **kwargs) class _CommandExecutor: def execute(self, cmd, *args, **kwargs): fn = getattr(self, cmd, None) if not fn: logger.info('root worker received an unknown command "%s"', cmd.name) return try: return fn(*args, **kwargs) except Exception: logger.exception('Exception caugth in root worker process') def update(self, uuid_): logger.debug('[%s] updating apt cache', uuid_) cmd = ['apt-get', 'update', '-q'] p = exec_and_log(logger.debug, logger.error, cmd) return p.returncode == 0 def install(self, uuid_, deb): logger.debug('[%s] installing %s...', uuid_, deb) cmd = ['gdebi', '-nq', deb] p = exec_and_log(logger.debug, logger.error, cmd) return p.returncode == 0 def uninstall(self, uuid, package_name): logger.debug('[%s] uninstalling %s', uuid, package_name) cmd = ['apt-get', 'remove', '-y', package_name] p = exec_and_log(logger.debug, logger.error, cmd) return p.returncode == 0 def _ignore_sigterm(signum, frame): logger.info('root worker is ignoring a SIGTERM') def _run(command_queue, result_queue, stop_requested): logger.info('root worker started') os.setsid() signal.signal(signal.SIGTERM, _ignore_sigterm) executor = _CommandExecutor() while not stop_requested.is_set(): try: cmd, args, kwargs = command_queue.get(timeout=0.1) except (KeyboardInterrupt, Empty, Exception): continue result = executor.execute(cmd, *args, **kwargs) result_queue.put(result) logger.info('root worker done')
gpl-3.0
4,388,545,950,319,832,000
29.118519
92
0.589769
false
Eric89GXL/vispy
examples/basics/scene/modular_shaders/editor.py
1
7039
# -*- coding: utf-8 -*- # vispy: testskip # ----------------------------------------------------------------------------- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- """ # QScintilla editor # # Adapted from Eli Bendersky ([email protected]) # This code is in the public domain # # API: http://pyqt.sourceforge.net/Docs/QScintilla2/classQsciScintilla.html # """ import sys import re from PyQt5.QtCore import * # noqa from PyQt5.QtWidgets import * # noqa try: from PyQt5 import Qsci from PyQt5.Qsci import QsciScintilla HAVE_QSCI = True except ImportError: HAVE_QSCI = False if not HAVE_QSCI: # backup editor in case QScintilla is not available class Editor(QPlainTextEdit): def __init__(self, parent=None, language=None): QPlainTextEdit.__init__(self, parent) def setText(self, text): self.setPlainText(text) def text(self): return str(self.toPlainText()).encode('UTF-8') def __getattr__(self, name): return lambda: None else: class Editor(QsciScintilla): ARROW_MARKER_NUM = 8 def __init__(self, parent=None, language='Python'): super(Editor, self).__init__(parent) self.setIndentationsUseTabs(False) self.setIndentationWidth(4) # Set the default font font = QFont() font.setFamily('DejaVu Sans Mono') font.setFixedPitch(True) font.setPointSize(10) self.setFont(font) self.setMarginsFont(font) self.zoomIn() # Margin 0 is used for line numbers fontmetrics = QFontMetrics(font) self.setMarginsFont(font) self.setMarginWidth(0, fontmetrics.width("000") + 6) self.setMarginLineNumbers(0, True) self.setMarginsBackgroundColor(QColor("#cccccc")) self._marker = None # Clickable margin 1 for showing markers #self.setMarginSensitivity(1, True) #self.connect(self, # SIGNAL('marginClicked(int, int, Qt::KeyboardModifiers)'), # self.on_margin_clicked) self.markerDefine(QsciScintilla.RightArrow, self.ARROW_MARKER_NUM) self.setMarkerBackgroundColor(QColor("#ee1111"), self.ARROW_MARKER_NUM) # Brace matching: enable for a brace immediately before or after # the current position # self.setBraceMatching(QsciScintilla.SloppyBraceMatch) # Current line visible with special background color self.setCaretLineVisible(True) self.setCaretLineBackgroundColor(QColor("#ffe4e4")) # Set Python lexer # Set style for Python comments (style number 1) to a fixed-width # courier. # lexer = getattr(Qsci, 'QsciLexer' + language)() lexer.setDefaultFont(font) self.setLexer(lexer) self.SendScintilla(QsciScintilla.SCI_STYLESETFONT, 1, 'Courier') # Don't want to see the horizontal scrollbar at all # Use raw message to Scintilla here (all messages are documented # here: http://www.scintilla.org/ScintillaDoc.html) self.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 0) self.setWrapMode(QsciScintilla.WrapWord) self.setEolMode(QsciScintilla.EolUnix) # not too small #self.setMinimumSize(600, 450) def set_marker(self, line): self.clear_marker() self.markerAdd(line, self.ARROW_MARKER_NUM) self._marker = line def clear_marker(self): if self._marker is not None: self.markerDelete(self._marker, self.ARROW_MARKER_NUM) #def on_margin_clicked(self, nmargin, nline, modifiers): ## Toggle marker for the line the margin was clicked on #if self.markersAtLine(nline) != 0: #self.markerDelete(nline, self.ARROW_MARKER_NUM) #else: #self.markerAdd(nline, self.ARROW_MARKER_NUM) def wheelEvent(self, ev): # Use ctrl+wheel to zoom in/out if Qt.ControlModifier & ev.modifiers(): if ev.delta() > 0: self.zoomIn() else: self.zoomOut() else: return super(Editor, self).wheelEvent(ev) def keyPressEvent(self, ev): if int(Qt.ControlModifier & ev.modifiers()) > 0: if ev.key() == Qt.Key_Slash: self.comment(True) return elif ev.key() == Qt.Key_Question: self.comment(False) return elif (ev.key() == Qt.Key_Z and Qt.ShiftModifier & ev.modifiers()): self.redo() return elif ev.key() == Qt.Key_Q: sys.exit(0) return super(Editor, self).keyPressEvent(ev) def text(self): return str(super(Editor, self).text()).encode('UTF-8') def comment(self, comment=True): sel = self.getSelection()[:] text = self.text() lines = text.split('\n') if sel[0] == -1: # toggle for just this line row, col = self.getCursorPosition() line = lines[row] self.setSelection(row, 0, row, len(line)) if comment: line = '#' + line else: line = line.replace("#", "", 1) self.replaceSelectedText(line) self.setCursorPosition(row, col+(1 if col > 0 else 0)) else: block = lines[sel[0]:sel[2]] # make sure all lines have # new = [] if comment: for line in block: new.append('#' + line) else: for line in block: if line.strip() == '': new.append(line) continue if re.match(r'\s*\#', line) is None: return new.append(line.replace('#', '', 1)) self.setSelection(sel[0], 0, sel[2], 0) self.replaceSelectedText('\n'.join(new) + '\n') #shift = 1 if comment else -1 self.setSelection(sel[0], max(0, sel[1]), sel[2], sel[3]) if __name__ == "__main__": app = QApplication(sys.argv) editor = Editor() editor.show() editor.setText(open(sys.argv[0]).read()) editor.resize(800, 800) app.exec_()
bsd-3-clause
6,342,080,126,043,699,000
34.913265
79
0.516124
false
Canpio/Paddle
python/paddle/fluid/tests/unittests/testsuite.py
1
6629
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import paddle.fluid.core as core from paddle.fluid.op import Operator def as_lodtensor(np_array, lod, place): tensor = core.LoDTensor() tensor.set(np_value, place) if lod is not None: tensor.set_lod(lod) return tensor def create_op(scope, op_type, inputs, outputs, attrs): kwargs = dict() op_maker = core.op_proto_and_checker_maker op_role_attr_name = op_maker.kOpRoleAttrName() if op_role_attr_name not in attrs: attrs[op_role_attr_name] = int(op_maker.OpRole.Forward) def __create_var__(name, var_name): scope.var(var_name).get_tensor() kwargs[name].append(var_name) for in_name, in_dup in Operator.get_op_inputs(op_type): if in_name in inputs: kwargs[in_name] = [] if in_dup: sub_in = inputs[in_name] for item in sub_in: sub_in_name, _ = item[0], item[1] __create_var__(in_name, sub_in_name) else: __create_var__(in_name, in_name) for out_name, out_dup in Operator.get_op_outputs(op_type): if out_name in outputs: kwargs[out_name] = [] if out_dup: sub_out = outputs[out_name] for item in sub_out: sub_out_name, _ = item[0], item[1] __create_var__(out_name, sub_out_name) else: __create_var__(out_name, out_name) for attr_name in Operator.get_op_attr_names(op_type): if attr_name in attrs: kwargs[attr_name] = attrs[attr_name] return Operator(op_type, **kwargs) def set_input(scope, op, inputs, place): def __set_input__(var_name, var): if isinstance(var, tuple) or isinstance(var, np.ndarray): tensor = scope.find_var(var_name).get_tensor() if isinstance(var, tuple): tensor.set_lod(var[1]) var = var[0] tensor.set_dims(var.shape) tensor.set(var, place) elif isinstance(var, float): scope.find_var(var_name).set_float(var) elif isinstance(var, int): scope.find_var(var_name).set_int(var) for in_name, in_dup in Operator.get_op_inputs(op.type()): if in_name in inputs: if in_dup: sub_in = inputs[in_name] for item in sub_in: sub_in_name, sub_in_val = item[0], item[1] __set_input__(sub_in_name, sub_in_val) else: __set_input__(in_name, inputs[in_name]) def append_input_output(block, op_proto, np_list, is_input, dtype): '''Insert VarDesc and generate Python variable instance''' proto_list = op_proto.inputs if is_input else op_proto.outputs def create_var(block, name, np_list, var_proto): dtype = None shape = None lod_level = None if name not in np_list: assert var_proto.intermediate, "{} not found".format(name) else: np_value = np_list[name] if isinstance(np_value, tuple): dtype = np_value[0].dtype # output shape, lod should be infered from input. if is_input: shape = list(np_value[0].shape) lod_level = len(np_value[1]) else: dtype = np_value.dtype if is_input: shape = list(np_value.shape) lod_level = 0 return block.create_var( dtype=dtype, shape=shape, lod_level=lod_level, name=name) var_dict = {} for var_proto in proto_list: var_name = str(var_proto.name) if is_input: if (var_name not in np_list) and var_proto.dispensable: continue assert (var_name in np_list) or (var_proto.dispensable), \ "Missing {} as input".format(var_name) if var_proto.duplicable: assert isinstance(np_list[var_name], list), \ "Duplicable {} should be set as list".format(var_name) var_list = [] for (name, np_value) in np_list[var_name]: var_list.append( create_var(block, name, {name: np_value}, var_proto)) var_dict[var_name] = var_list else: var_dict[var_name] = create_var(block, var_name, np_list, var_proto) return var_dict def append_loss_ops(block, output_names): mean_inputs = map(block.var, output_names) # for item in mean_inputs: # print(item) # print("Item", item.dtype) if len(mean_inputs) == 1: loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1]) op = block.append_op( inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean') op.desc.infer_var_type(block.desc) op.desc.infer_shape(block.desc) else: avg_sum = [] for cur_loss in mean_inputs: cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1]) op = block.append_op( inputs={"X": [cur_loss]}, outputs={"Out": [cur_avg_loss]}, type="mean") op.desc.infer_var_type(block.desc) op.desc.infer_shape(block.desc) avg_sum.append(cur_avg_loss) loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1]) op_sum = block.append_op( inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum') op_sum.desc.infer_var_type(block.desc) op_sum.desc.infer_shape(block.desc) loss = block.create_var(dtype=loss_sum.dtype, shape=[1]) op_loss = block.append_op( inputs={"X": loss_sum}, outputs={"Out": loss}, type='scale', attrs={'scale': 1.0 / float(len(avg_sum))}) op_loss.desc.infer_var_type(block.desc) op_loss.desc.infer_shape(block.desc) return loss
apache-2.0
-5,607,755,067,677,922,000
35.423077
80
0.557701
false
zebMcCorkle/ZeroNet
src/Test/conftest.py
1
5367
import os import sys import urllib import time import logging import json import pytest import mock def pytest_addoption(parser): parser.addoption("--slow", action='store_true', default=False, help="Also run slow tests") # Config if sys.platform == "win32": PHANTOMJS_PATH = "tools/phantomjs/bin/phantomjs.exe" else: PHANTOMJS_PATH = "phantomjs" SITE_URL = "http://127.0.0.1:43110" sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + "/../lib")) # External modules directory sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + "/..")) # Imports relative to src dir from Config import config config.argv = ["none"] # Dont pass any argv to config parser config.parse() config.data_dir = "src/Test/testdata" # Use test data for unittests config.debug_socket = True # Use test data for unittests config.tor = "disabled" # Don't start Tor client logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) from Plugin import PluginManager PluginManager.plugin_manager.loadPlugins() import gevent from gevent import monkey monkey.patch_all(thread=False) from Site import Site from User import UserManager from File import FileServer from Connection import ConnectionServer from Crypt import CryptConnection from Ui import UiWebsocket from Tor import TorManager @pytest.fixture(scope="session") def resetSettings(request): os.chdir(os.path.abspath(os.path.dirname(__file__) + "/../..")) # Set working dir open("%s/sites.json" % config.data_dir, "w").write("{}") open("%s/users.json" % config.data_dir, "w").write(""" { "15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc": { "certs": {}, "master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a", "sites": {} } } """) def cleanup(): os.unlink("%s/sites.json" % config.data_dir) os.unlink("%s/users.json" % config.data_dir) request.addfinalizer(cleanup) @pytest.fixture(scope="session") def resetTempSettings(request): data_dir_temp = config.data_dir + "-temp" if not os.path.isdir(data_dir_temp): os.mkdir(data_dir_temp) open("%s/sites.json" % data_dir_temp, "w").write("{}") open("%s/users.json" % data_dir_temp, "w").write(""" { "15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc": { "certs": {}, "master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a", "sites": {} } } """) def cleanup(): os.unlink("%s/sites.json" % data_dir_temp) os.unlink("%s/users.json" % data_dir_temp) request.addfinalizer(cleanup) @pytest.fixture() def site(): site = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT") return site @pytest.fixture() def site_temp(request): with mock.patch("Config.config.data_dir", config.data_dir + "-temp"): site_temp = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT") def cleanup(): site_temp.storage.deleteFiles() request.addfinalizer(cleanup) return site_temp @pytest.fixture(scope="session") def user(): user = UserManager.user_manager.get() user.sites = {} # Reset user data return user @pytest.fixture(scope="session") def browser(): try: from selenium import webdriver browser = webdriver.PhantomJS(executable_path=PHANTOMJS_PATH, service_log_path=os.path.devnull) browser.set_window_size(1400, 1000) except Exception, err: raise pytest.skip("Test requires selenium + phantomjs: %s" % err) return browser @pytest.fixture(scope="session") def site_url(): try: urllib.urlopen(SITE_URL).read() except Exception, err: raise pytest.skip("Test requires zeronet client running: %s" % err) return SITE_URL @pytest.fixture(scope="session") def file_server(request): request.addfinalizer(CryptConnection.manager.removeCerts) # Remove cert files after end file_server = FileServer("127.0.0.1", 1544) gevent.spawn(lambda: ConnectionServer.start(file_server)) # Wait for port opening for retry in range(10): time.sleep(0.1) # Port opening try: conn = file_server.getConnection("127.0.0.1", 1544) conn.close() break except Exception, err: print err assert file_server.running def stop(): file_server.stop() request.addfinalizer(stop) return file_server @pytest.fixture() def ui_websocket(site, file_server, user): class WsMock: def __init__(self): self.result = None def send(self, data): self.result = json.loads(data)["result"] ws_mock = WsMock() ui_websocket = UiWebsocket(ws_mock, site, file_server, user, None) def testAction(action, *args, **kwargs): func = getattr(ui_websocket, "action%s" % action) func(0, *args, **kwargs) return ui_websocket.ws.result ui_websocket.testAction = testAction return ui_websocket @pytest.fixture(scope="session") def tor_manager(): try: tor_manager = TorManager() assert tor_manager.connect() tor_manager.startOnions() except Exception, err: raise pytest.skip("Test requires Tor with ControlPort: %s, %s" % (config.tor_controller, err)) return tor_manager
gpl-2.0
4,731,466,866,487,467,000
27.854839
104
0.65027
false
google/nerfies
nerfies/utils.py
1
14210
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Non-differentiable utility functions.""" import collections from concurrent import futures import contextlib import functools import time from typing import List, Union import jax from jax import tree_util import jax.numpy as jnp import numpy as np from scipy import interpolate from scipy.spatial.transform import Rotation from scipy.spatial.transform import Slerp import tqdm # pylint: disable=unused-argument @functools.partial(jax.custom_jvp, nondiff_argnums=(1, 2, 3)) def safe_norm(x, axis=-1, keepdims=False, tol=1e-9): """Calculates a np.linalg.norm(d) that's safe for gradients at d=0. These gymnastics are to avoid a poorly defined gradient for np.linal.norm(0) see https://github.com/google/jax/issues/3058 for details Args: x: A np.array axis: The axis along which to compute the norm keepdims: if True don't squeeze the axis. tol: the absolute threshold within which to zero out the gradient. Returns: Equivalent to np.linalg.norm(d) """ return jnp.linalg.norm(x, axis=axis, keepdims=keepdims) @safe_norm.defjvp def _safe_norm_jvp(axis, keepdims, tol, primals, tangents): """Custom JVP rule for safe_norm.""" x, = primals x_dot, = tangents safe_tol = max(tol, 1e-30) y = safe_norm(x, tol=safe_tol, axis=axis, keepdims=True) y_safe = jnp.maximum(y, tol) # Prevent divide by zero. y_dot = jnp.where(y > safe_tol, x_dot * x / y_safe, jnp.zeros_like(x)) y_dot = jnp.sum(y_dot, axis=axis, keepdims=True) # Squeeze the axis if `keepdims` is True. if not keepdims: y = jnp.squeeze(y, axis=axis) y_dot = jnp.squeeze(y_dot, axis=axis) return y, y_dot def jacobian_to_curl(jacobian): """Computes the curl from the Jacobian.""" dfx_dy = jacobian[..., 0, 1] dfx_dz = jacobian[..., 0, 2] dfy_dx = jacobian[..., 1, 0] dfy_dz = jacobian[..., 1, 2] dfz_dx = jacobian[..., 2, 0] dfz_dy = jacobian[..., 2, 1] return jnp.stack([ dfz_dy - dfy_dz, dfx_dz - dfz_dx, dfy_dx - dfx_dy, ], axis=-1) def jacobian_to_div(jacobian): """Computes the divergence from the Jacobian.""" # If F : x -> x + f(x) then dF/dx = 1 + df/dx, so subtract 1 for each # diagonal of the Jacobian. return jnp.trace(jacobian, axis1=-2, axis2=-1) - 3.0 def compute_psnr(mse): """Compute psnr value given mse (we assume the maximum pixel value is 1). Args: mse: float, mean square error of pixels. Returns: psnr: float, the psnr value. """ return -10. * jnp.log(mse) / jnp.log(10.) @jax.jit def robust_whiten(x): median = jnp.nanmedian(x) mad = jnp.nanmean(jnp.abs(x - median)) return (x - median) / mad def interpolate_codes( codes: Union[np.ndarray, List[np.ndarray]], num_samples: int, method='spline'): """Interpolates latent codes. Args: codes: the codes to interpolate. num_samples: the number of samples to interpolate to. method: which method to use for interpolation. Returns: (np.ndarray): the interpolated codes. """ if isinstance(codes, list): codes = np.array(codes) t = np.arange(len(codes)) xs = np.linspace(0, len(codes) - 1, num_samples) if method == 'spline': cs = interpolate.CubicSpline(t, codes, bc_type='natural') return cs(xs).astype(np.float32) elif method == 'linear': interp = interpolate.interp1d(t, codes, axis=0) return interp(xs).astype(np.float32) raise ValueError(f'Unknown method {method!r}') def interpolate_cameras(cameras, num_samples: int): """Interpolates the cameras to the number of output samples. Uses a spherical linear interpolation (Slerp) to interpolate the camera orientations and a cubic spline to interpolate the camera positions. Args: cameras: the input cameras to interpolate. num_samples: the number of output cameras. Returns: (List[vision_sfm.Camera]): a list of interpolated cameras. """ rotations = [] positions = [] for camera in cameras: rotations.append(camera.orientation) positions.append(camera.position) in_times = np.linspace(0, 1, len(rotations)) slerp = Slerp(in_times, Rotation.from_dcm(rotations)) spline = interpolate.CubicSpline(in_times, positions) out_times = np.linspace(0, 1, num_samples) out_rots = slerp(out_times).as_dcm() out_positions = spline(out_times) ref_camera = cameras[0] out_cameras = [] for out_rot, out_pos in zip(out_rots, out_positions): out_camera = ref_camera.copy() out_camera.orientation = out_rot out_camera.position = out_pos out_cameras.append(out_camera) return out_cameras def logit(y): """The inverse of tf.nn.sigmoid().""" return -jnp.log(1. / y - 1.) def affine_sigmoid(real, lo=0, hi=1): """Maps reals to (lo, hi), where 0 maps to (lo+hi)/2.""" if not lo < hi: raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi)) alpha = jax.nn.sigmoid(real) * (hi - lo) + lo return alpha def inv_affine_sigmoid(alpha, lo=0, hi=1): """The inverse of affine_sigmoid(., lo, hi).""" if not lo < hi: raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi)) real = logit((alpha - lo) / (hi - lo)) return real def inv_softplus(y): """The inverse of tf.nn.softplus().""" return jnp.where(y > 87.5, y, jnp.log(jnp.expm1(y))) def affine_softplus(real, lo=0, ref=1): """Maps real numbers to (lo, infinity), where 0 maps to ref.""" if not lo < ref: raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref)) shift = inv_softplus(1.0) scale = (ref - lo) * jax.nn.softplus(real + shift) + lo return scale def inv_affine_softplus(scale, lo=0, ref=1): """The inverse of affine_softplus(., lo, ref).""" if not lo < ref: raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref)) shift = inv_softplus(1.0) real = inv_softplus((scale - lo) / (ref - lo)) - shift return real def learning_rate_decay(step, init_lr=5e-4, decay_steps=100000, decay_rate=0.1): """Continuous learning rate decay function. The computation for learning rate is lr = (init_lr * decay_rate**(step / decay_steps)) Args: step: int, the global optimization step. init_lr: float, the initial learning rate. decay_steps: int, the decay steps, please see the learning rate computation above. decay_rate: float, the decay rate, please see the learning rate computation above. Returns: lr: the learning for global step 'step'. """ power = step / decay_steps return init_lr * (decay_rate**power) def log1p_safe(x): """The same as tf.math.log1p(x), but clamps the input to prevent NaNs.""" return jnp.log1p(jnp.minimum(x, 3e37)) def exp_safe(x): """The same as tf.math.exp(x), but clamps the input to prevent NaNs.""" return jnp.exp(jnp.minimum(x, 87.5)) def expm1_safe(x): """The same as tf.math.expm1(x), but clamps the input to prevent NaNs.""" return jnp.expm1(jnp.minimum(x, 87.5)) def safe_sqrt(x, eps=1e-7): safe_x = jnp.where(x == 0, jnp.ones_like(x) * eps, x) return jnp.sqrt(safe_x) @jax.jit def general_loss_with_squared_residual(squared_x, alpha, scale): r"""The general loss that takes a squared residual. This fuses the sqrt operation done to compute many residuals while preserving the square in the loss formulation. This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: squared_x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps # This will be used repeatedly. squared_scaled_x = squared_x / (scale ** 2) # The loss when alpha == 2. loss_two = 0.5 * squared_scaled_x # The loss when alpha == 0. loss_zero = log1p_safe(0.5 * squared_scaled_x) # The loss when alpha == -infinity. loss_neginf = -jnp.expm1(-0.5 * squared_scaled_x) # The loss when alpha == +infinity. loss_posinf = expm1_safe(0.5 * squared_scaled_x) # The loss when not in one of the above special cases. # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. beta_safe = jnp.maximum(eps, jnp.abs(alpha - 2.)) # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. alpha_safe = jnp.where( jnp.greater_equal(alpha, 0.), jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) loss_otherwise = (beta_safe / alpha_safe) * ( jnp.power(squared_scaled_x / beta_safe + 1., 0.5 * alpha) - 1.) # Select which of the cases of the loss to return. loss = jnp.where( alpha == -jnp.inf, loss_neginf, jnp.where( alpha == 0, loss_zero, jnp.where( alpha == 2, loss_two, jnp.where(alpha == jnp.inf, loss_posinf, loss_otherwise)))) return loss def shard(xs, device_count=None): """Split data into shards for multiple devices along the first dimension.""" if device_count is None: jax.local_device_count() return jax.tree_map(lambda x: x.reshape((device_count, -1) + x.shape[1:]), xs) def to_device(xs): """Transfer data to devices (GPU/TPU).""" return jax.tree_map(jnp.array, xs) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" if padding > 0: return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))[:-padding] else: return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) def normalize(x): """Normalization helper function.""" return x / np.linalg.norm(x) def parallel_map(f, iterable, max_threads=None, show_pbar=False, **kwargs): """Parallel version of map().""" with futures.ThreadPoolExecutor(max_threads) as executor: if show_pbar: results = tqdm.tqdm( executor.map(f, iterable, **kwargs), total=len(iterable)) else: results = executor.map(f, iterable, **kwargs) return list(results) def strided_subset(sequence, count): """Returns a strided subset of a list.""" if count: stride = max(1, len(sequence) // count) return sequence[::stride] return sequence def tree_collate(list_of_pytrees): """Collates a list of pytrees with the same structure.""" return tree_util.tree_multimap(lambda *x: np.asarray(x), *list_of_pytrees) @contextlib.contextmanager def print_time(name): """Records the time elapsed.""" start = time.time() yield elapsed = time.time() - start print(f'[{name}] time elapsed: {elapsed:.04f}') class ValueMeter: """Tracks the average of a value.""" def __init__(self): self._values = [] def reset(self): """Resets the meter.""" self._values.clear() def update(self, value): """Adds a value to the meter.""" self._values.append(value) def reduce(self, reduction='mean'): """Reduces the tracked values.""" if reduction == 'mean': return np.mean(self._values) elif reduction == 'std': return np.std(self._values) elif reduction == 'last': return self._values[-1] else: raise ValueError(f'Unknown reduction {reduction}') class TimeTracker: """Tracks the average time elapsed over multiple steps.""" def __init__(self): self._meters = collections.defaultdict(ValueMeter) self._marked_time = collections.defaultdict(float) @contextlib.contextmanager def record_time(self, key: str): """Records the time elapsed.""" start = time.time() yield elapsed = time.time() - start self.update(key, elapsed) def update(self, key, value): """Updates the time value for a given key.""" self._meters[key].update(value) def tic(self, *args): """Marks the starting time of an event.""" for key in args: self._marked_time[key] = time.time() def toc(self, *args): """Records the time elapsed based on the previous call to `tic`.""" for key in args: self.update(key, time.time() - self._marked_time[key]) del self._marked_time[key] def reset(self): """Resets all time meters.""" for meter in self._meters.values(): meter.reset() def summary(self, reduction='mean'): """Returns a dictionary of reduced times.""" time_dict = {k: v.reduce(reduction) for k, v in self._meters.items()} if 'total' not in time_dict: time_dict['total'] = sum(time_dict.values()) time_dict['steps_per_sec'] = 1.0 / time_dict['total'] return time_dict def summary_str(self, reduction='mean'): """Returns a string of reduced times.""" strings = [f'{k}={v:.04f}' for k, v in self.summary(reduction).items()] return ', '.join(strings)
apache-2.0
2,626,745,283,115,632,600
29.55914
80
0.660591
false
infolock/My-Sublime
JSHint Gutter/JSHint.py
1
10626
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import sublime, sublime_plugin import os, sys, subprocess, codecs, re, webbrowser from threading import Timer try: import commands except ImportError: pass PLUGIN_FOLDER = os.path.dirname(os.path.realpath(__file__)) RC_FILE = ".jshintrc" SETTINGS_FILE = "JSHint.sublime-settings" KEYMAP_FILE = "Default ($PLATFORM).sublime-keymap" OUTPUT_VALID = b"*** JSHint output ***" class JshintCommand(sublime_plugin.TextCommand): def run(self, edit, show_regions=True, show_panel=True): # Make sure we're only linting javascript files. if self.file_unsupported(): return # Get the current text in the buffer and save it in a temporary file. # This allows for scratch buffers and dirty files to be linted as well. temp_file_path = self.save_buffer_to_temp_file() output = self.run_script_on_file(temp_file_path) os.remove(temp_file_path) # Dump any diagnostics and get the output after the identification marker. if PluginUtils.get_pref('print_diagnostics'): print(self.get_output_diagnostics(output)) output = self.get_output_data(output) # We're done with linting, rebuild the regions shown in the current view. JshintGlobalStore.reset() JshintEventListeners.reset() self.view.erase_regions("jshint_errors") regions = [] menuitems = [] # For each line of jshint output (errors, warnings etc.) add a region # in the view and a menuitem in a quick panel. for line in output.splitlines(): try: line_no, column_no, description = line.split(" :: ") except: continue symbol_name = re.match("('[^']+')", description) hint_point = self.view.text_point(int(line_no) - 1, int(column_no) - 1) if symbol_name: hint_region = self.view.word(hint_point) else: hint_region = self.view.line(hint_point) regions.append(hint_region) menuitems.append(line_no + ":" + column_no + " " + description) JshintGlobalStore.errors.append((hint_region, description)) if show_regions: self.add_regions(regions) if show_panel: self.view.window().show_quick_panel(menuitems, self.on_quick_panel_selection) def file_unsupported(self): file_path = self.view.file_name() view_settings = self.view.settings() has_js_extension = file_path != None and bool(re.search(r'\.jsm?$', file_path)) has_js_syntax = bool(re.search("JavaScript", view_settings.get("syntax"), re.I)) has_json_syntax = bool(re.search("JSON", view_settings.get("syntax"), re.I)) return has_json_syntax or (not has_js_extension and not has_js_syntax) def save_buffer_to_temp_file(self): buffer_text = self.view.substr(sublime.Region(0, self.view.size())) temp_file_name = ".__temp__" temp_file_path = PLUGIN_FOLDER + "/" + temp_file_name f = codecs.open(temp_file_path, mode="w", encoding="utf-8") f.write(buffer_text) f.close() return temp_file_path def run_script_on_file(self, temp_file_path): try: node_path = PluginUtils.get_node_path() script_path = PLUGIN_FOLDER + "/scripts/run.js" file_path = self.view.file_name() cmd = [node_path, script_path, temp_file_path, file_path or "?"] output = PluginUtils.get_output(cmd) # Make sure the correct/expected output is retrieved. if output.find(OUTPUT_VALID) != -1: return output msg = "Command " + '" "'.join(cmd) + " created invalid output." print(output) raise Exception(msg) except: # Something bad happened. print("Unexpected error({0}): {1}".format(sys.exc_info()[0], sys.exc_info()[1])) # Usually, it's just node.js not being found. Try to alleviate the issue. msg = "Node.js was not found in the default path. Please specify the location." if not sublime.ok_cancel_dialog(msg): msg = "You won't be able to use this plugin without specifying the path to node.js." sublime.error_message(msg) else: PluginUtils.open_sublime_settings(self.view.window()) def get_output_diagnostics(self, output): index = output.find(OUTPUT_VALID) return output[:index].decode("utf-8") def get_output_data(self, output): index = output.find(OUTPUT_VALID) return output[index + len(OUTPUT_VALID) + 1:].decode("utf-8") def add_regions(self, regions): package_name = (PLUGIN_FOLDER.split(os.path.sep))[-1] if int(sublime.version()) >= 3000: icon = "Packages/" + package_name + "/warning.png" self.view.add_regions("jshint_errors", regions, "keyword", icon, sublime.DRAW_EMPTY | sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SQUIGGLY_UNDERLINE) else: icon = ".." + os.path.sep + package_name + os.path.sep + "warning" self.view.add_regions("jshint_errors", regions, "keyword", icon, sublime.DRAW_EMPTY | sublime.DRAW_OUTLINED) def on_quick_panel_selection(self, index): if index == -1: return # Focus the user requested region from the quick panel. region = JshintGlobalStore.errors[index][0] region_cursor = sublime.Region(region.begin(), region.begin()) selection = self.view.sel() selection.clear() selection.add(region_cursor) self.view.show(region_cursor) if not PluginUtils.get_pref("highlight_selected_regions"): return self.view.erase_regions("jshint_selected") self.view.add_regions("jshint_selected", [region], "meta") class JshintGlobalStore: errors = [] @classmethod def reset(self): self.errors = [] class JshintEventListeners(sublime_plugin.EventListener): timer = None @classmethod def reset(self): # Invalidate any previously set timer. if self.timer != None: self.timer.cancel() self.timer = None @classmethod def on_modified(self, view): # Continue only if the plugin settings allow this to happen. # This is only available in Sublime 3. if int(sublime.version()) < 3000: return if not PluginUtils.get_pref("lint_on_edit"): return # Re-run the jshint command after a second of inactivity after the view # has been modified, to avoid regions getting out of sync with the actual # previously linted source code. self.reset() timeout = PluginUtils.get_pref("lint_on_edit_timeout") self.timer = Timer(timeout, lambda: view.window().run_command("jshint", { "show_panel": False })) self.timer.start() @staticmethod def on_post_save(view): # Continue only if the current plugin settings allow this to happen. if PluginUtils.get_pref("lint_on_save"): view.window().run_command("jshint", { "show_panel": False }) @staticmethod def on_load(view): # Continue only if the current plugin settings allow this to happen. if PluginUtils.get_pref("lint_on_load"): v = view.window() if int(sublime.version()) < 3000 else view v.run_command("jshint", { "show_panel": False }) @staticmethod def on_selection_modified(view): caret_region = view.sel()[0] for message_region, message_text in JshintGlobalStore.errors: if message_region.intersects(caret_region): sublime.status_message(message_text) return else: sublime.status_message("") class JshintSetLintingPrefsCommand(sublime_plugin.TextCommand): def run(self, edit): PluginUtils.open_config_rc(self.view.window()) class JshintSetPluginOptionsCommand(sublime_plugin.TextCommand): def run(self, edit): PluginUtils.open_sublime_settings(self.view.window()) class JshintSetKeyboardShortcutsCommand(sublime_plugin.TextCommand): def run(self, edit): PluginUtils.open_sublime_keymap(self.view.window(), { "windows": "Windows", "linux": "Linux", "osx": "OSX" }.get(sublime.platform())) class JshintSetNodePathCommand(sublime_plugin.TextCommand): def run(self, edit): PluginUtils.open_sublime_settings(self.view.window()) class JshintClearAnnotationsCommand(sublime_plugin.TextCommand): def run(self, edit): JshintEventListeners.reset() self.view.erase_regions("jshint_errors") self.view.erase_regions("jshint_selected") class PluginUtils: @staticmethod def get_pref(key): return sublime.load_settings(SETTINGS_FILE).get(key) @staticmethod def open_config_rc(window): window.open_file(PLUGIN_FOLDER + "/" + RC_FILE) @staticmethod def open_sublime_settings(window): window.open_file(PLUGIN_FOLDER + "/" + SETTINGS_FILE) @staticmethod def open_sublime_keymap(window, platform): window.open_file(PLUGIN_FOLDER + "/" + KEYMAP_FILE.replace("$PLATFORM", platform)) @staticmethod def exists_in_path(cmd): # Can't search the path if a directory is specified. assert not os.path.dirname(cmd) path = os.environ.get("PATH", "").split(os.pathsep) extensions = os.environ.get("PATHEXT", "").split(os.pathsep) # For each directory in PATH, check if it contains the specified binary. for directory in path: base = os.path.join(directory, cmd) options = [base] + [(base + ext) for ext in extensions] for filename in options: if os.path.exists(filename): return True return False @staticmethod def get_node_path(): # Simply using `node` without specifying a path sometimes doesn't work :( if PluginUtils.exists_in_path("nodejs"): return "nodejs" elif PluginUtils.exists_in_path("node"): return "node" else: platform = sublime.platform() node = PluginUtils.get_pref("node_path").get(platform) print("Using node.js path on '" + platform + "': " + node) return node @staticmethod def get_output(cmd): if int(sublime.version()) < 3000: if sublime.platform() != "windows": # Handle Linux and OS X in Python 2. run = '"' + '" "'.join(cmd) + '"' return commands.getoutput(run) else: # Handle Windows in Python 2. # Prevent console window from showing. startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW return subprocess.Popen(cmd, \ stdout=subprocess.PIPE, \ startupinfo=startupinfo).communicate()[0] else: # Handle all OS in Python 3. run = '"' + '" "'.join(cmd) + '"' return subprocess.check_output(run, stderr=subprocess.STDOUT, shell=True, env=os.environ)
mit
-8,686,644,277,519,501,000
33.72549
101
0.665443
false
Nithanaroy/SemanticMachineTranslation
py-aligner/align.py
1
1079
import nltk import os from nltk.corpus.util import LazyCorpusLoader from nltk.corpus.europarl_raw import german, english from nltk.corpus.reader import AlignedCorpusReader from nltk.translate import AlignedSent, Alignment from nltk.corpus.reader.plaintext import PlaintextCorpusReader def align(filename): files = filename.split('(') ripe_file = os.path.abspath(files[1]) raw_file = os.path.abspath(files[0]) raw_for_nltk = os.path.abspath('data/newcorpus/source.txt') with open(files[0]) as f: with open(raw_for_nltk,"w") as f1: for line in f: f1.write(line) corpusdir = 'data/newcorpus/' newcorpus = PlaintextCorpusReader(corpusdir, '.*',sent_tokenizer=nltk.data.LazyLoader('tokenizers/punkt/german.pickle')) out = open(ripe_file, "w") i = 0 temp =[] temp.append(newcorpus.sents(raw_for_nltk)) tempVal = str(temp[i]) tempVal = tempVal.replace(",", "") tempVal = tempVal.replace("u'", "") tempVal = tempVal.replace("'", "") tempVal = tempVal.replace("[", "") tempVal = tempVal.replace("]", "") out.write(tempVal+os.linesep) out.close() return
mit
3,444,761,004,346,962,400
28.162162
121
0.716404
false
benkhlifafahmi/funnycode
bot/fb_happy_eid/fb_bot.py
1
1999
import re from bs4 import * import mechanize def login(br): user = "[YOUR LOGIN]" pwd = "[YOUR PASS]" br.open('https://m.facebook.com/') br.select_form(nr=0) br.form['email'] = user br.form['pass'] = pwd br.submit() return br def get_friends_list(br): res = [] i = 1 while(True): br.open('https://m.facebook.com/friends/center/friends/?ppk=%d&tid=u_o_0'%i) html_page = br.response().read() soup_page = BeautifulSoup(html_page, "lxml") friends = soup_page.find_all('a',{'class':'bn'}) if friends == [] or friends == None: break for f in friends: link = f['href'] uname = f.text url= r'uid=\S+&' p = re.compile(url) uid = p.findall(link) if len(uid)>0: uid = uid[0].replace('uid=','')[:-1] res.append({ 'uname':uname, 'id': uid, 'message':'Hi %s, I wish for you a happy Eid, (This message was sent from my bot you can download it via : https://github.com/benkhlifafahmi/funnycode/tree/master/bot/fb_happy_eid ) Have a nice day.'%(uname) }) i+=1 print "Getting Friend page number : %s"%i return res def send_message(br,friend): br.open('https://m.facebook.com/messages/read/?fbid=1') br._factory.is_html = True br.select_form(predicate=lambda frm: 'id' in frm.attrs and frm.attrs['id'] == 'composer_form') br.form.new_control('text','ids[0]',{}) br['ids[0]']= friend['id'] br['body'] = friend['message'].encode('utf-8') br.submit() print "Message sent to %s with id : %s"%(friend['uname'],friend['id']) return br def main(): br = mechanize.Browser() br.set_handle_robots(False) br._factory.is_html = True br.addheaders = [('User-agent','Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/45.0.2454101')] br = login(br) friend_list = get_friends_list(br) for friend in friend_list: try: br = send_message(br, friend) except Exception as e: print "Failed to send Message to %s with exception : %s"%(friend['uname'],str(e)) if __name__ == '__main__': main()
mit
5,665,094,342,824,357,000
29.287879
212
0.635818
false
nschloe/meshio
src/meshio/xdmf/common.py
1
5448
import numpy as np from .._exceptions import ReadError from .._mesh import CellBlock numpy_to_xdmf_dtype = { "int8": ("Int", "1"), "int16": ("Int", "2"), "int32": ("Int", "4"), "int64": ("Int", "8"), "uint8": ("UInt", "1"), "uint16": ("UInt", "2"), "uint32": ("UInt", "4"), "uint64": ("UInt", "8"), "float32": ("Float", "4"), "float64": ("Float", "8"), } xdmf_to_numpy_type = {v: k for k, v in numpy_to_xdmf_dtype.items()} dtype_to_format_string = { "int32": "%d", "int64": "%d", "uint32": "%d", "uint64": "%d", "float32": "%.7e", "float64": "%.16e", } # See # <https://www.xdmf.org/index.php/XDMF_Model_and_Format#XML_Element_.28Xdmf_ClassName.29_and_Default_XML_Attributes> # <https://gitlab.kitware.com/xdmf/xdmf/blob/master/Xdmf.dtd#L34> # for XDMF types. # There appears to be no particular consistency, so allow for different # alternatives as well. meshio_to_xdmf_type = { "vertex": ["Polyvertex"], "line": ["Polyline"], "line3": ["Edge_3"], "quad": ["Quadrilateral"], "quad8": ["Quadrilateral_8", "Quad_8"], "quad9": ["Quadrilateral_9", "Quad_9"], "pyramid": ["Pyramid"], "pyramid13": ["Pyramid_13"], "tetra": ["Tetrahedron"], "triangle": ["Triangle"], "triangle6": ["Triangle_6", "Tri_6"], "tetra10": ["Tetrahedron_10", "Tet_10"], "wedge": ["Wedge"], "wedge15": ["Wedge_15"], "wedge18": ["Wedge_18"], "hexahedron": ["Hexahedron"], "hexahedron20": ["Hexahedron_20", "Hex_20"], "hexahedron24": ["Hexahedron_24", "Hex_24"], "hexahedron27": ["Hexahedron_27", "Hex_27"], "hexahedron64": ["Hexahedron_64", "Hex_64"], "hexahedron125": ["Hexahedron_125", "Hex_125"], "hexahedron216": ["Hexahedron_216", "Hex_216"], "hexahedron343": ["Hexahedron_343", "Hex_343"], "hexahedron512": ["Hexahedron_512", "Hex_512"], "hexahedron729": ["Hexahedron_729", "Hex_729"], "hexahedron1000": ["Hexahedron_1000", "Hex_100"], "hexahedron1331": ["Hexahedron_1331", "Hex_1331"], } xdmf_to_meshio_type = {v: k for k, vals in meshio_to_xdmf_type.items() for v in vals} # Check out # <https://gitlab.kitware.com/xdmf/xdmf/blob/master/XdmfTopologyType.cpp> # for the list of indices. xdmf_idx_to_meshio_type = { 0x1: "vertex", 0x2: "line", 0x4: "triangle", 0x5: "quad", 0x6: "tetra", 0x7: "pyramid", 0x8: "wedge", 0x9: "hexahedron", 0x22: "line3", 0x23: "quad9", 0x24: "triangle6", 0x25: "quad8", 0x26: "tetra10", 0x27: "pyramid13", 0x28: "wedge15", 0x29: "wedge18", 0x30: "hexahedron20", 0x31: "hexahedron24", 0x32: "hexahedron27", 0x33: "hexahedron64", 0x34: "hexahedron125", 0x35: "hexahedron216", 0x36: "hexahedron343", 0x37: "hexahedron512", 0x38: "hexahedron729", 0x39: "hexahedron1000", 0x40: "hexahedron1331", # 0x41: 'hexahedron_spectral_64', # 0x42: 'hexahedron_spectral_125', # 0x43: 'hexahedron_spectral_216', # 0x44: 'hexahedron_spectral_343', # 0x45: 'hexahedron_spectral_512', # 0x46: 'hexahedron_spectral_729', # 0x47: 'hexahedron_spectral_1000', # 0x48: 'hexahedron_spectral_1331', } meshio_type_to_xdmf_index = {v: k for k, v in xdmf_idx_to_meshio_type.items()} def translate_mixed_cells(data): # Translate it into the cells dictionary. # `data` is a one-dimensional vector with # (cell_type1, p0, p1, ... ,pk, cell_type2, p10, p11, ..., p1k, ... # https://www.xdmf.org/index.php/XDMF_Model_and_Format#Arbitrary # https://gitlab.kitware.com/xdmf/xdmf/blob/master/XdmfTopologyType.hpp#L394 xdmf_idx_to_num_nodes = { 1: 1, # vertex 2: 2, # line 4: 3, # triangle 5: 4, # quad 6: 4, # tet 7: 5, # pyramid 8: 6, # wedge 9: 8, # hex 11: 6, # triangle6 } # collect types and offsets types = [] offsets = [] r = 0 while r < len(data): xdmf_type = data[r] types.append(xdmf_type) offsets.append(r) if xdmf_type == 2: # line if data[r + 1] != 2: # polyline raise ReadError("XDMF reader: Only supports 2-point lines for now") r += 1 r += 1 r += xdmf_idx_to_num_nodes[xdmf_type] types = np.array(types) offsets = np.array(offsets) b = np.concatenate([[0], np.where(types[:-1] != types[1:])[0] + 1, [len(types)]]) cells = [] for start, end in zip(b[:-1], b[1:]): meshio_type = xdmf_idx_to_meshio_type[types[start]] n = xdmf_idx_to_num_nodes[types[start]] point_offsets = offsets[start:end] + (2 if types[start] == 2 else 1) indices = np.array([np.arange(n) + o for o in point_offsets]) cells.append(CellBlock(meshio_type, data[indices])) return cells def attribute_type(data): # <https://www.xdmf.org/index.php/XDMF_Model_and_Format#Attribute> if len(data.shape) == 1 or (len(data.shape) == 2 and data.shape[1] == 1): return "Scalar" elif len(data.shape) == 2 and data.shape[1] in [2, 3]: return "Vector" elif (len(data.shape) == 2 and data.shape[1] == 9) or ( len(data.shape) == 3 and data.shape[1] == 3 and data.shape[2] == 3 ): return "Tensor" elif len(data.shape) == 2 and data.shape[1] == 6: return "Tensor6" if len(data.shape) != 3: raise ReadError() return "Matrix"
mit
3,061,314,807,218,892,300
30.131429
116
0.570668
false
emlprime/fantasyx
fantasyx/game.py
1
10240
import json from models import Character, User, Draft, DraftTicket, Episode, DraftHistory, Rubric, Score from sqlalchemy import or_, exc from sqlalchemy.orm import lazyload from datetime import timedelta, datetime import pandas as pd import numpy as np def handle_event(msg_type, msg, db_session=None): print("handling %s" % msg_type) handlers = { "DRAFT": draft, "RELEASE": release, "UPDATE_USER": update_user, } print "msg_type: %s" % msg if msg_type in handlers.keys(): response = handlers[msg_type](msg, db_session) else: response = {"error": "no handler implemented for %s" % msg_type} return response def initial_data(user_identifier, db_session): rubric = format_rubric(db_session) characters = format_characters(db_session) scores = format_scores(db_session) owners = format_owners(db_session) user_data = format_user_data(user_identifier, db_session) can_draft = format_can_draft(user_identifier,db_session) can_release = format_can_release(user_identifier,db_session) return [rubric, characters, scores, owners, can_draft, user_data] def format_owners(db_session): result = db_session.query(User).order_by(User.name).values(User.name) owners = [{"username": user[0]} for user in result] return {"type": "OWNERS", "owners": owners} # The full list of characters def format_rubric(db_session): rubric = {} result = db_session.query(Rubric).order_by(Rubric.kind, Rubric.points).values(Rubric.description, Rubric.kind, Rubric.points, Rubric.canon) for row in result: description, kind, points, canon = row if not kind in rubric.keys(): rubric[kind] = [] rubric[kind].append({"description":description, "points":points, "kind": canon}) return {"type": "RUBRIC", "rubric": [{"title": title, "data": data} for title, data in rubric.items()]} def format_characters(db_session): result = db_session.query(Character).outerjoin(Draft).outerjoin(User).order_by(Character.name).values(Character.id, Character.name, Character.description, User.name) return {"type": "CHARACTERS", "characters": [{"id": item[0], "name": item[1], "description": item[2], "user": item[3]} for item in result if item[0]]} # user details for the front end display def format_user_data(user_identifier, db_session): print "selecting user for user identifier: %s" % (user_identifier) user = db_session.query(User).filter(User.identifier == user_identifier).first() return {"type": "USER_DATA", "user_data": {"email": user.email, "username": user.name, "seat_of_power": user.seat_of_power, "house_words": user.house_words}} def format_scores(db_session): query = db_session.query(Score).outerjoin(Character).options(lazyload('rubric')) raw_report = query.order_by(Score.episode_number, Character.name).all() scores = [{ "id": score.id, "episode_number": score.episode_number, "owner": score.user.name if score.user else "", "notes": score.notes, "points": score.points, "bonus": score.bonus, "score": score.points + (score.bonus or 0), "kind": score.rubric.kind, "canon": score.rubric.canon, "description": score.rubric.description, "character_name": score.character.name, } for score in raw_report] return {"type":"SCORES", "scores": scores} # action to draft character from available characters def draft(data, db_session): user_identifier = data["data"]["user_identifier"] character_id = data["data"]["character_id"] user = db_session.query(User).filter(User.identifier == user_identifier).first() if not user: raise Exception("No user found with identifier %s" % user_identifier) if not user.can_draft(db_session): return {"can_draft": False} draft_ticket = user.next_draft_ticket(db_session) if draft_ticket: if not draft_ticket.user_identifier == user_identifier: raise Exception("It is not %s's turn to draft" % user_identifier) character = db_session.query(Character).outerjoin(Draft).filter(Draft.id == None).filter(Character.id == character_id).first() result = "Drafting %s for %s" % (character, user_identifier) user.draft(character) draft_history = { "character_id": character.id, "user_id": user.id } db_session.execute(DraftHistory.__table__.insert(), draft_history) if draft_ticket: db_session.delete(draft_ticket) db_session.commit() db_session.query(Character).outerjoin(Draft).filter(Draft.id == None).values(Character.id, Character.name) return "%s drafted %s" % (user.name, character.name) # action to release a character from the draft for this user_identifier def release(data, db_session): user_identifier = data["data"]["user_identifier"] character_id = data["data"]["character_id"] user = db_session.query(User).filter(User.identifier == user_identifier).first() if not user: raise Exception("No user found with identifier %s" % user_identifier) character = db_session.query(Character).filter(Character.id == character_id).first() user.release(character) draft_history_ids = db_session.query(User).outerjoin(DraftHistory).outerjoin(Character).filter(User.name==user.name, Character.name==character.name).values(DraftHistory.id) for draft_history_id in draft_history_ids: draft_history = db_session.query(DraftHistory).filter(DraftHistory.id == draft_history_id).update({"released_at":datetime.now()}) db_session.commit() return "%s released %s" % (user.name, character.name) def format_can_draft(user_identifier, db_session): user = db_session.query(User).filter(User.identifier == user_identifier).first() if not user: raise Exception("No user found with identifier %s" % user_identifier) return {"type": "CAN_DRAFT", "can_draft": user.can_draft(db_session)} def format_can_release(user_identifier, db_session): user = db_session.query(User).filter(User.identifier == user_identifier).first() if not user: raise Exception("No user found with identifier %s" % user_identifier) return {"type": "CAN_RELEASE", "can_release": user.can_release(db_session)} def generate_score(msg, db_session): # Get the character from the unique character name character_name = msg['character_name'] character = db_session.query(Character).filter(Character.name == character_name).first() if not character: db_session.execute(Character.__table__.insert(), [{"name": character_name}]) character = db_session.query(Character).filter(Character.name == character_name).first() # get the episode from the unique episode number episode_number = msg['episode_number'] episode = db_session.query(Episode).filter(Episode.number == episode_number).first() # get a draft from the draft history. This is a historically idempotent approach # ideally we should be able to clear and regenerate the scores at any time based on the draft history data. This depends upon the assumption that no drafts can be overlapping draft_history = db_session.query(DraftHistory).join(Character).filter( Character.id == character.id, DraftHistory.drafted_at < (episode.aired_at - timedelta(hours=4)), (or_(DraftHistory.released_at == None, DraftHistory.released_at > episode.aired_at)) ).first() # If we found a draft, populate the score with the relevant user information if draft_history: user = draft_history.user user_id = user.id draft_id = draft_history.id # if we don't find a draft, still create the score, but don't associate it with a # user this gives us a sense of the "points on the table" that were left because # nobody had that character drafted at the time. else: user_id = None draft_id = None # specify the description, but apply points from the rubric # this depends on the assumption that the rubric doesn't change once the # game is in motion. If the rubric changes rubric_description = msg['rubric_description'] rubric = db_session.query(Rubric).filter(Rubric.description == rubric_description).first() if not rubric: db_session.execute(Rubric.__table__.insert(), [{"description": rubric_description, "points": msg['points'] or 0, "canon": "altfacts"}]) rubric = db_session.query(Rubric).filter(Rubric.description == rubric_description).first() # bonus can be applied if specified. If there is no bonus, make it 0 for easier summing bonus = int(msg['bonus'] or 0) # notes are to explain why specifically they got this. notes = msg['notes'] score_config = { "character_id": character.id, "draft_id": draft_id, "user_id": user_id, "episode_number": episode.number, "rubric_id": rubric.id, "points": rubric.points, "bonus": bonus, "notes": notes, } db_session.execute(Score.__table__.insert(), score_config) db_session.commit() def update_user(msg, db_session): data = { "name": msg["data"]["username"], "seat_of_power": msg["data"]["seat_of_power"], "house_words": msg["data"]["house_words"], } print data try: db_session.query(User).filter(User.name == data['name']).update(data) db_session.commit() # return { # "notify": "User %s updated" % data["name"], # "user_data": {"username": data['name'], "seat_of_power": data['seat_of_power'], "house_words": data['house_words']}, # } except exc.InternalError, exception: reason = exception.message print "Failed because: %s" % reason db_session.rollback() return {"notify": "User %s failed to update because %s" % (data["name"], reason)} except exc.IntegrityError, exception: reason = exception.message print "Failed because: %s" % reason db_session.rollback() return {"notify": "User %s failed to update because %s" % (data["name"], reason)}
mit
-8,171,953,140,849,885,000
43.329004
178
0.660742
false
gomezgoiri/reusingWebActuatorsFromSemanticSpace
actuation/scenarios/mixed_space_rest.py
1
2861
# -*- coding: utf-8 -*- ''' Copyright (C) 2013 onwards University of Deusto All rights reserved. This software is licensed as described in the file COPYING, which you should have received as part of this distribution. This software consists of contributions made by many individuals, listed below: @author: Aitor Gómez Goiri <[email protected]> ''' from actuation.proofs.reason import EyeReasoner from actuation.scenarios.abstract import AbstractSimulation, main from actuation.impl.space import CoordinationSpace from actuation.impl.rest.lamp.provider import LampProviderRESTMock from actuation.impl.space.lamp.mock.consumer import LampConsumerSpaceMock from actuation.impl.mix import IntermediaryAgent from actuation.impl.rest.mock.discovery import MockDiscovery class OnlySpaceBasedDevicesSimulator(AbstractSimulation): def __init__(self, input_folder, output_folder, path_to_reasoner, num_providers, debug = False): super(OnlySpaceBasedDevicesSimulator, self).__init__( output_folder ) self.input_folder = input_folder self.reasoner = EyeReasoner( path_to_reasoner ) @property def lc(self): return self.nodes["consumer"] @lc.setter def lc(self, value): self.nodes["consumer"] = value @property def lp(self): return self.nodes["provider"] @lp.setter def lp(self, value): self.nodes["provider"] = value def configure(self): debug = True discovery = MockDiscovery() self.space = CoordinationSpace("mixedSpace") self.nodes["agent"] = IntermediaryAgent( self.space, self.input_folder+"mix/", self.output_folder, self.reasoner, discovery) self.lp = LampProviderRESTMock( self.input_folder + "rest/", self.output_folder) self.lc = LampConsumerSpaceMock( self.space, self.input_folder + "space/", self.output_folder, debug = debug ) discovery.add_discovered( self.lp, "example.org") def execute(self): """ Executes the scenario with a REST provider and a consumer using a space. """ light_value = 30 self.lc.subscribe_to_result(light_value) self.lc.write_task(light_value) def check(self): rsc = self.lp.get_resource("/lamp/actuators/light/2/") # TODO check that the value of this resource is the desired one (manually checked) return rsc is not None if __name__ == '__main__': main( OnlySpaceBasedDevicesSimulator )
apache-2.0
9,016,202,217,310,364,000
34.320988
100
0.603147
false
spatialaudio/panorama
Python/python_core_components/ptfile_creator.py
1
1819
class PtCreator: ''' Create the PTSticher-input-file''' def __init__(self, output_dir, a, b, c): self.output_dir = output_dir # Disortion a, b, c self.a = a self.b = b self.c = c def init_file(self): # return first block of PTSticher-file string = '# PTStitcher script, written by hugin' + '\r\n' string += '\r\n' + 'p f2 w3000 h1500 v340 n"TIFF_m c:LZW r:CROP"' string += '\r\n' + 'm g1 i0 f0 m2 p0.00784314' + '\r\n' string += '\r\n' + '# output image lines' string += '\r\n' return string def create_oline(self, vertical_pos, horizontal_pos, angle, filename): ''' Create o-Lines. A o-Line contains the absolute position of one picture in our panorama [ in degree ! ]. We need one o-Line for each picture. ''' line = 'o w1045 h697 f0 TrX0 TrY0 TrZ0' line += ' a' + str(self.a) + ' b' + str(self.b) + ' c' + str(self.c) line += ' d0 e0 g0' line += ' p' + str(vertical_pos) + ' r0 t0 v' + \ str(angle) + ' y' + str(horizontal_pos) line += ' n"' + str(filename) + '"\r\n' return line def create_file(self, li_vertical, li_horizontal, angle): ''' Return the complete pt_file ''' pt_file = self.init_file() li_prefix = list(map(chr, range(97, 123))) for index_v, pos_v in enumerate(li_vertical): prefix = li_prefix[index_v] for index_h, pos_h in enumerate(li_horizontal): pic = prefix + "_" + str(index_h) + ".jpg" pt_file += self.create_oline(-pos_v, round((360.0 - pos_h) % 360.0, 2), angle, pic) pt_file += '\r\n' return pt_file
mit
-5,853,065,582,672,730,000
30.362069
91
0.503024
false
ChameleonCloud/horizon
openstack_dashboard/dashboards/project/floating_ips/views.py
1
5125
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # Copyright (c) 2012 X.commerce, a business unit of eBay Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Views for managing floating IPs. """ from django.urls import reverse_lazy from django.utils.translation import ugettext_lazy as _ from neutronclient.common import exceptions as neutron_exc from horizon import exceptions from horizon import forms from horizon import tables from horizon import workflows from openstack_dashboard import api from openstack_dashboard.usage import quotas from openstack_dashboard.dashboards.project.floating_ips \ import forms as project_forms from openstack_dashboard.dashboards.project.floating_ips \ import tables as project_tables from openstack_dashboard.dashboards.project.floating_ips \ import workflows as project_workflows class AssociateView(workflows.WorkflowView): workflow_class = project_workflows.IPAssociationWorkflow class AllocateView(forms.ModalFormView): form_class = project_forms.FloatingIpAllocate form_id = "associate_floating_ip_form" page_title = _("Allocate Floating IP") template_name = 'project/floating_ips/allocate.html' submit_label = _("Allocate IP") submit_url = reverse_lazy("horizon:project:floating_ips:allocate") success_url = reverse_lazy('horizon:project:floating_ips:index') def get_object_display(self, obj): return obj.ip def get_context_data(self, **kwargs): context = super(AllocateView, self).get_context_data(**kwargs) try: context['usages'] = quotas.tenant_quota_usages( self.request, targets=('floatingip', ) ) except Exception: exceptions.handle(self.request) return context def get_initial(self): try: pools = api.neutron.floating_ip_pools_list(self.request) except neutron_exc.ConnectionFailed: pools = [] exceptions.handle(self.request) except Exception: pools = [] exceptions.handle(self.request, _("Unable to retrieve floating IP pools.")) pool_list = [(pool.id, pool.name) for pool in pools] if not pool_list: pool_list = [(None, _("No floating IP pools available"))] return {'pool_list': pool_list} class IndexView(tables.DataTableView): table_class = project_tables.FloatingIPsTable page_title = _("Floating IPs") def get_data(self): try: search_opts = self.get_filters() floating_ips = api.neutron.tenant_floating_ip_list(self.request, **search_opts) except neutron_exc.ConnectionFailed: floating_ips = [] exceptions.handle(self.request) except Exception: floating_ips = [] exceptions.handle(self.request, _('Unable to retrieve floating IP addresses.')) try: floating_ip_pools = \ api.neutron.floating_ip_pools_list(self.request) except neutron_exc.ConnectionFailed: floating_ip_pools = [] exceptions.handle(self.request) except Exception: floating_ip_pools = [] exceptions.handle(self.request, _('Unable to retrieve floating IP pools.')) pool_dict = dict((obj.id, obj.name) for obj in floating_ip_pools) attached_instance_ids = [ip.instance_id for ip in floating_ips if ip.instance_id is not None] instances_dict = {} if attached_instance_ids: instances = [] try: # TODO(tsufiev): we should pass attached_instance_ids to # nova.server_list as soon as Nova API allows for this instances, has_more = api.nova.server_list(self.request, detailed=False) except Exception: exceptions.handle(self.request, _('Unable to retrieve instance list.')) instances_dict = dict((obj.id, obj.name) for obj in instances) for ip in floating_ips: ip.instance_name = instances_dict.get(ip.instance_id) ip.pool_name = pool_dict.get(ip.pool, ip.pool) return floating_ips
apache-2.0
-6,416,642,596,869,219,000
36.683824
78
0.626927
false
glyphobet/pottymouth
python/setup.py
1
2371
#!/usr/bin/env python try: from setuptools import setup except ImportError: from distutils.core import setup from pottymouth import __version__ import os import os.path import shutil setup(name='PottyMouth', py_modules=['pottymouth'], version=__version__, data_files=[('share/doc/python-pottymouth', ['readme.html', 'LICENSE.txt', 'tests.py' , 'web.py' , 'profile.py' ,]),], # metadata for fun author='Matt Chisholm', author_email='[email protected]', description="transform unstructured, untrusted text to safe, valid XHTML", license='BSD License', keywords='wiki', url='http://glyphobet.net/pottymouth', download_url='http://glyphobet.net/pottymouth/dist/', long_description="""PottyMouth transforms completely unstructured and untrusted text to valid, nice-looking, completely safe XHTML. PottyMouth is designed to handle input text from non-technical, potentially careless or malicious users. It produces HTML that is completely safe, programmatically and visually, to include on any web page. And you don't need to make your users read any instructions before they start typing. They don't even need to know that PottyMouth is being used.""", classifiers=['Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Environment :: Web Environment', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: News/Diary', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: Implementation', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', ], platforms='All', )
bsd-3-clause
1,629,577,475,898,001,200
49.446809
355
0.568536
false
a-nai/django-wiki
wiki/decorators.py
1
8825
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import from django.core.urlresolvers import reverse from django.http import HttpResponse, HttpResponseNotFound, \ HttpResponseForbidden, HttpResponseRedirect from django.shortcuts import redirect, get_object_or_404, get_list_or_404 from django.template.context import RequestContext from django.template.loader import render_to_string import datetime from myproject.models import WikiArticleread as articleread1 try: import json except ImportError: from django.utils import simplejson as json from wiki.core.exceptions import NoRootURL from wiki.conf import settings from django.utils.http import urlquote from six.moves import filter def json_view(func): def wrap(request, *args, **kwargs): obj = func(request, *args, **kwargs) if isinstance(obj, HttpResponse): # Special behaviour: If it's a redirect, for instance # because of login protection etc. just return # the redirect if obj.status_code == 301 or obj.status_code == 302: return obj data = json.dumps(obj, ensure_ascii=False) status = kwargs.get('status', 200) response = HttpResponse(content_type='application/json', status=status) response.write(data) return response return wrap def response_forbidden(request, article, urlpath): if request.user.is_anonymous(): qs = request.META.get('QUERY_STRING', '') if qs: qs = urlquote('?' + qs) else: qs = '' return redirect(settings.LOGIN_URL + "?next=" + request.path + qs) else: c = RequestContext(request, {'article': article, 'urlpath': urlpath}) return HttpResponseForbidden( render_to_string( "wiki/permission_denied.html", context_instance=c)) def get_article(func=None, can_read=True, can_write=False, deleted_contents=False, not_locked=False, can_delete=False, can_moderate=False, can_create=False): """View decorator for processing standard url keyword args: Intercepts the keyword args path or article_id and looks up an article, calling the decorated func with this ID. Will accept a func(request, article, *args, **kwargs) NB! This function will redirect if an article does not exist, permissions are missing or the article is deleted. Arguments: can_read=True and/or can_write=True: Check that the current request.user has correct permissions. can_delete and can_moderate: Verifies with wiki.core.permissions can_create: Same as can_write but adds an extra global setting for anonymous access (ANONYMOUS_CREATE) deleted_contents=True: Do not redirect if the article has been deleted. not_locked=True: Return permission denied if the article is locked Also see: wiki.views.mixins.ArticleMixin """ def wrapper(request, *args, **kwargs): from . import models path = kwargs.pop('path', None) article_id = kwargs.pop('article_id', None) urlpath = None # fetch by urlpath.path if not path is None: try: urlpath = models.URLPath.get_by_path(path, select_related=True) except NoRootURL: return redirect('wiki:root_create') except models.URLPath.DoesNotExist: try: pathlist = list( filter( lambda x: x != "", path.split("/"), )) path = "/".join(pathlist[:-1]) parent = models.URLPath.get_by_path(path) return HttpResponseRedirect( reverse( "wiki:create", kwargs={'path': parent.path, }) + "?slug=%s" % pathlist[-1]) except models.URLPath.DoesNotExist: c = RequestContext( request, { 'error_type': 'ancestors_missing'}) return HttpResponseNotFound( render_to_string( "wiki/error.html", context_instance=c)) if urlpath.article: # urlpath is already smart about prefetching items on article # (like current_revision), so we don't have to article = urlpath.article articlesread = models.Articleread.objects if (request.user.id==None): iduser='3'; else: iduser=request.user.id; try: articleread1.objects.filter(user_id=iduser, article_id=article.id)[0] except: #import pdb; pdb.set_trace() #,current_revision=article.current_revision.id user=articleread1.objects.create(percent='0',read=True,user_id=iduser,article_id=article.id, paid=False,readed=datetime.datetime.now(),last=datetime.datetime.now()) user.save() import types user=articleread1.objects.filter(user_id=iduser, article_id=article.id) #import pdb; pdb.set_trace() if len(user)==2: articleread1.objects.latest('id').delete() #user=articleread1.objects.latest('id'); #user.paid='0';user.save() #import pdb; pdb.set_trace() if (request.user.id!=None): articleread = get_object_or_404(articlesread, article_id=article.id,user_id=request.user.id) else: articleread = get_object_or_404(articlesread, article_id=article.id,user_id="3") else: # Be robust: Somehow article is gone but urlpath exists... # clean up return_url = reverse( 'wiki:get', kwargs={ 'path': urlpath.parent.path}) urlpath.delete() return HttpResponseRedirect(return_url) # fetch by article.id elif article_id: # TODO We should try to grab the article form URLPath so the # caching is good, and fall back to grabbing it from # Article.objects if not articles = models.Article.objects articlesread = models.Articleread.objects article = get_object_or_404(articles, id=article_id) articleread = get_object_or_404(articlesread, article_id=article.id) try: urlpath = models.URLPath.objects.get(articles__article=article) except models.URLPath.DoesNotExist as noarticle: models.URLPath.MultipleObjectsReturned = noarticle urlpath = None else: raise TypeError('You should specify either article_id or path') if not deleted_contents: # If the article has been deleted, show a special page. if urlpath: if urlpath.is_deleted(): # This also checks all ancestors return redirect('wiki:deleted', path=urlpath.path) else: if article.current_revision and article.current_revision.deleted: return redirect('wiki:deleted', article_id=article.id) if article.current_revision.locked and not_locked: return response_forbidden(request, article, urlpath) if can_read and not article.can_read(request.user): return response_forbidden(request, article, urlpath) if (can_write or can_create) and not article.can_write(request.user): return response_forbidden(request, article, urlpath) if can_create and not ( request.user.is_authenticated() or settings.ANONYMOUS_CREATE): return response_forbidden(request, article, urlpath) if can_delete and not article.can_delete(request.user): return response_forbidden(request, article, urlpath) if can_moderate and not article.can_moderate(request.user): return response_forbidden(request, article, urlpath) kwargs['urlpath'] = urlpath return func(request, article, articleread, *args, **kwargs) if func: return wrapper else: return lambda func: get_article( func, can_read=can_read, can_write=can_write, deleted_contents=deleted_contents, not_locked=not_locked, can_delete=can_delete, can_moderate=can_moderate, can_create=can_create)
gpl-3.0
7,039,904,762,252,002,000
39.668203
185
0.586742
false
dstufft/cryptography
tests/hazmat/primitives/test_hkdf.py
1
6173
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import binascii import pytest import six from cryptography.exceptions import ( AlreadyFinalized, InvalidKey, _Reasons ) from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand from ...utils import raises_unsupported_algorithm @pytest.mark.hmac class TestHKDF(object): def test_length_limit(self, backend): big_length = 255 * (hashes.SHA256().digest_size // 8) + 1 with pytest.raises(ValueError): HKDF( hashes.SHA256(), big_length, salt=None, info=None, backend=backend ) def test_already_finalized(self, backend): hkdf = HKDF( hashes.SHA256(), 16, salt=None, info=None, backend=backend ) hkdf.derive(b"\x01" * 16) with pytest.raises(AlreadyFinalized): hkdf.derive(b"\x02" * 16) hkdf = HKDF( hashes.SHA256(), 16, salt=None, info=None, backend=backend ) hkdf.verify(b"\x01" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u") with pytest.raises(AlreadyFinalized): hkdf.verify(b"\x02" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u") hkdf = HKDF( hashes.SHA256(), 16, salt=None, info=None, backend=backend ) def test_verify(self, backend): hkdf = HKDF( hashes.SHA256(), 16, salt=None, info=None, backend=backend ) hkdf.verify(b"\x01" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u") def test_verify_invalid(self, backend): hkdf = HKDF( hashes.SHA256(), 16, salt=None, info=None, backend=backend ) with pytest.raises(InvalidKey): hkdf.verify(b"\x02" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u") def test_unicode_typeerror(self, backend): with pytest.raises(TypeError): HKDF( hashes.SHA256(), 16, salt=six.u("foo"), info=None, backend=backend ) with pytest.raises(TypeError): HKDF( hashes.SHA256(), 16, salt=None, info=six.u("foo"), backend=backend ) with pytest.raises(TypeError): hkdf = HKDF( hashes.SHA256(), 16, salt=None, info=None, backend=backend ) hkdf.derive(six.u("foo")) with pytest.raises(TypeError): hkdf = HKDF( hashes.SHA256(), 16, salt=None, info=None, backend=backend ) hkdf.verify(six.u("foo"), b"bar") with pytest.raises(TypeError): hkdf = HKDF( hashes.SHA256(), 16, salt=None, info=None, backend=backend ) hkdf.verify(b"foo", six.u("bar")) @pytest.mark.hmac class TestHKDFExpand(object): def test_derive(self, backend): prk = binascii.unhexlify( b"077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5" ) okm = (b"3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c" b"5bf34007208d5b887185865") info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9") hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend) assert binascii.hexlify(hkdf.derive(prk)) == okm def test_verify(self, backend): prk = binascii.unhexlify( b"077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5" ) okm = (b"3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c" b"5bf34007208d5b887185865") info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9") hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend) assert hkdf.verify(prk, binascii.unhexlify(okm)) is None def test_invalid_verify(self, backend): prk = binascii.unhexlify( b"077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5" ) info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9") hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend) with pytest.raises(InvalidKey): hkdf.verify(prk, b"wrong key") def test_already_finalized(self, backend): info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9") hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend) hkdf.derive(b"first") with pytest.raises(AlreadyFinalized): hkdf.derive(b"second") def test_unicode_error(self, backend): info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9") hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend) with pytest.raises(TypeError): hkdf.derive(six.u("first")) def test_invalid_backend(): pretend_backend = object() with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE): HKDF(hashes.SHA256(), 16, None, None, pretend_backend) with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE): HKDFExpand(hashes.SHA256(), 16, None, pretend_backend)
apache-2.0
2,612,955,914,697,860,600
27.187215
79
0.568767
false
appanacca/tutmom
check_env.py
1
3306
#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2015-2016 California Institute of Technology. # Copyright (c) 2016-2018 Mike McKerns. # License: 3-clause BSD. """ check environment scipt """ import sys # requirements has = dict( # optimization scipy='0.6.0', mystic='0.3.1', # parallel computing pathos='0.2.1', # dependencies pox='0.2.3', dill='0.2.7', klepto='0.1.4', numpy='1.0', sympy='0.6.7', ppft='1.6.4.7', multiprocess='0.70.5', # examples matplotlib='0.91', jupyter='1.0', cvxopt='1.1.0', # optional #pyina='0.2.0.dev0', #pulp='1.6.0', #Numberjack='1.1.0', #python-constraints='1.2', # installs as 'constraints' sqlalchemy='0.8.4', ) # executables # list: At least one item is expected # tuple: All items are expected run = dict( # optimization mystic=('mystic_log_reader.py','mystic_model_plotter.py', 'support_convergence.py','support_hypercube.py', 'support_hypercube_measures.py','support_hypercube_scenario.py',), # parallel computing pathos=('pathos_tunnel.py','pathos_server.py','tunneled_pathos_server.py',), # dependencies ppft=('ppserver.py',), # examples ### jupyter-notebook # optional #pyina=('sync','cp','rm','ezpool.py','ezscatter.py',), ) returns = 0 # check installed packages for module in has.keys(): try: _module = module.split('-')[-1] __module__ = __import__(_module, globals(), locals(), [], 0) exec('%s = __module__' % _module) except ImportError: print("%s:: %s" % (module, sys.exc_info()[1])) run.pop(module, None) returns += 1 # check required versions from distutils.version import LooseVersion as V for module,version in has.items(): try: _module = module.split('-')[-1] assert V(eval(_module).__version__) >= V(version) except NameError: pass # failed import except AttributeError: pass # can't version-check non-standard packages... except AssertionError: print("%s:: Version >= %s is required" % (module, version)) returns += 1 def executable_exist(module, prog): try: assert which(prog) # process = Popen([prog, '--help'], stderr=STDOUT, stdout=PIPE) # process.wait() return True except (OSError, AssertionError): from sys import exc_info print("%s:: Executable '%s' not found" % (module, prog)) #print("%s:: %s" % (prog, exc_info()[1])) return False # check required executables try: from pox import which #from subprocess import Popen, STDOUT, PIPE#, call except ImportError: sys.exit(returns) for module,executables in run.items(): if isinstance(executables, list): found = False for executable in executables: if executable_exist(module, executable): found = True break if not found: returns += 1 else: for executable in executables: if not executable_exist(module, executable): returns += 1 # final report if not returns: print('-'*50) print('OK. All required items installed.') sys.exit(returns)
bsd-3-clause
4,444,178,281,737,537,500
24.828125
80
0.594676
false
sodabiscuit/jsonschema-rest-sphinx
jsonschema2rst.py
1
7626
#!/usr/bin/python # -*- coding: utf-8 -*- import os import json import codecs import shutil import re class JSONSchema2RST: def __init__(self): self.data = {'type': 'root', 'children': {}} self.source = None self.target = None self.toctrees = [] def travel(self, source='./schemas', target='./source'): self.source = source self.index_target = target self.target = os.path.join(target, 'apis') self.blob_template = os.path.join(self.source, '_template/blob.rst') self.toc_template = os.path.join(self.source, '_template/toc.rst') self.index_template = os.path.join(self.source, '_template/index.rst') for root, subdirs, files in os.walk(source): for f in files: if os.path.basename(f) == 'config.json': config_path = os.path.join(root, os.path.basename(f)) config_data = self.parse_config(os.path.abspath(config_path)) config_type = config_data.get('type', 'toc') uri = self.get_uri(root) config_data.update({ 'uri': uri }) if config_type == 'toc': config_data.update({ 'children': {} }) elif config_type == 'blob': request_blob_path = os.path.join(root, 'request.json') response_blob_path = os.path.join(root, 'response.json') config_data.update({ 'request': self.parse_config(request_blob_path), 'response': self.parse_config(response_blob_path) }) self.update_uri_config(root, config_data) self.export_doc(self.data) toctrees = '' index_target_path = os.path.join(self.index_target,'index.rst') index_rst = self.parse_raw(self.index_template) for toc in self.toctrees: toctrees += ' ' toctrees += os.path.join('.', 'apis', toc, 'doc') toctrees += '\n' index_rst = re.sub('\{\{toctrees\}\}', toctrees, index_rst) index_file = codecs.open(index_target_path, encoding='utf-8', mode='w') index_file.write(index_rst) index_file.close() def get_uri(self, source): return os.path.relpath(os.path.abspath(source), os.path.abspath(self.source)) def update_uri_config(self, source, data): keys_str = self.get_uri(source) keys_list = keys_str.split('/') d = self.data for key in keys_list: if 'children' in d and key not in d['children']: d['children'].update({key: data}) d = d['children'][key] def export_doc(self, data): # print json.dumps(self.data, sort_keys=False, indent=4, separators=(',', ': ')) for k, v in data['children'].iteritems(): doc_type = v.get('type', 'toc') uri = v.get('uri') target_path = os.path.join(self.target, uri) if not os.path.isdir(target_path) or not os.path.exists(target_path): shutil.rmtree(target_path, ignore_errors=True) os.makedirs(target_path) target_path = os.path.join(self.target, uri, 'doc.rst') rst_raw = self.parse_blob(v) if doc_type == 'blob' else self.parse_toc(v) doc = codecs.open(target_path, encoding='utf-8', mode='w') doc.write(rst_raw) doc.close() if doc_type == 'toc': if 'children' in v: self.export_doc(v) elif doc_type == 'blob': if uri not in self.toctrees: self.toctrees.append(uri) example_source_path = os.path.join(self.source, uri, 'example.json') example_target_path = os.path.join(self.target, uri, 'example.json') if os.path.exists(example_source_path) and os.path.isfile(example_source_path): shutil.copy2(example_source_path, example_target_path) else: eg = codecs.open(example_target_path, encoding='utf-8', mode='w') eg.write('empty') eg.close() @staticmethod def parse_config(source): if os.path.exists(source): x = codecs.open(source, encoding='utf-8') data = json.loads(x.read()) x.close() return data else: return {} @staticmethod def parse_raw(source): if os.path.exists(source): x = codecs.open(source, encoding='utf-8') data = x.read() x.close() return data else: return 'null' def parse_toc(self, toc): rst = self.parse_raw(self.toc_template) rst = re.sub('\{\{title\}\}', toc.get('title'), rst) return rst def parse_blob(self, blob): rst = self.parse_raw(self.blob_template) rst = re.sub('\{\{title\}\}', blob.get('title'), rst) rst = re.sub('\{\{uri\}\}', blob.get('uri'), rst) rst = re.sub('\{\{authors\}\}', blob.get('authors'), rst) rst = re.sub('\{\{version\}\}', blob.get('version'), rst) rst = re.sub('\{\{method\}\}', blob.get('method'), rst) request_raw = '' if 'request' in blob: if not not blob.get('request'): request_raw = self.parse_schema(blob.get('request')) rst = re.sub('\{\{request\}\}', request_raw, rst) response_raw = '' if 'response' in blob: if not not blob.get('response'): response_raw = self.parse_schema(blob.get('response').get('properties').get('data')) rst = re.sub('\{\{response\}\}', response_raw, rst) return rst @staticmethod def parse_schema(schema): rst = u''' <table border="1" class="docutils"> <thead valign="bottom"> <tr> <th class="head">键名</th> <th class="head">类型</th> <th class="head">描述</th> <tr> </thead> <tbody valign="top"> ''' items = [] if 'properties' in schema: items = schema['properties'] elif 'items' in schema: items = schema['items']["properties"] if len(items) > 0: i = 0 for k,v in items.iteritems(): schema_type = v.get('type', 'string') rst += '<tr class="row-odd">' if i/2 == 0 else '<tr class="row-even">' rst += '<td>' + k + '</td><td>' + schema_type + '</td>' if schema_type == 'object' or schema_type == 'array': rst += '<td><strong style="display:block;margin-bottom:10px;">' + v.get('description', '-') + '</strong>' rst += JSONSchema2RST.parse_schema(v) rst += '</td>' else: rst += '<td>' + v.get('description', '-') + '</td>' rst += '</tr>' i += 1 else: rst += '<tr class="row-odd">' rst += u'<td colspan="3" style="text-align:center;">未配置</td>' rst += '</tr>' rst += ''' </tbody> </table> ''' return rst if __name__ == "__main__": jsonschema2rst = JSONSchema2RST() jsonschema2rst.travel()
mit
3,554,514,621,519,852,000
33.581818
125
0.488565
false
billowen/pygds
pygds/canvas.py
1
3240
from PySide.QtGui import * from polygon import Polygon from path import Path from aref import ARef from sref import SRef from graphicsitems import * import os class MyRect(QGraphicsRectItem): def __init__(self, x, y, w, h, scene=None): super().__init__(x, y, w, h, scene) def paint(self, painter, option, widget): pen = QPen(QColor(0, 0, 0)) if option.state & QStyle.State_Selected: pen.setStyle(Qt.DotLine) painter.setPen(pen) brush = QBrush(QColor(12, 34, 53)) brush.setStyle(Qt.HorPattern) painter.setBrush(brush) painter.drawRect(self.rect()) class Canvas(QGraphicsView): def __init__(self, cell, parent=None): super().__init__(parent) self.cell = cell self.setCacheMode(QGraphicsView.CacheBackground) scene = QGraphicsScene() self.setScene(scene) self.scale(1, -1) # self.scene().addItem(MyRect(10,10,10,10)) for element in self.cell: # if isinstance(element, SRef): # self.scene().addItem(SRefViewItem(element)) if isinstance(element, Polygon): self.scene().addItem(PolygonViewItem(element)) elif isinstance(element, Path): self.scene().addItem(PathViewItem(element)) elif isinstance(element, SRef): self.scene().addItem(SRefViewItem(element)) elif isinstance(element, ARef): self.scene().addItem(ARefViewItem(element)) self.fitInView(self.scene().sceneRect(), Qt.KeepAspectRatio) #def resizeEvent(self, *args, **kwargs): # self.fitInView(self.scene().sceneRect(), Qt.KeepAspectRatio) def wheelEvent(self, evt): zoom_in_factor = 1.25 zoom_out_factor = 1 / zoom_in_factor self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse) self.setResizeAnchor(QGraphicsView.AnchorUnderMouse) old_pos = self.mapToScene(evt.pos()) if evt.delta() > 0: zoom_fact = zoom_in_factor else: zoom_fact = zoom_out_factor self.scale(zoom_fact, zoom_fact) new_pos = self.mapToScene(evt.pos()) move = new_pos - old_pos self.translate(move.x(), move.y()) if __name__ == '__main__': file_name = "transistor_hisilicon_28_nm_RF_20150322.xsmc.db" print(os.getcwd()) gds = GDS() with open(file_name, 'rb') as stream: gds.read(stream) gds.build_cell_links() app = QApplication(sys.argv) canvas = Canvas(gds["CELL_007"]) mainwin = QMainWindow() mainwin.setCentralWidget(canvas) mainwin.show() sys.exit(app.exec_()) # if os.path.exists(file_name) is True: # try: # # except FileNotFoundError: # print("File not found") # except exceptions.EndOfFileError: # print("The file is not completed.") # except exceptions.IncorrectDataSize as e: # print(e.args[0]) # except exceptions.UnsupportedTagType as e: # print("Unsupported tag type ", e.args[0]) # except exceptions.FormatError as e: # print(e.args[0], e.args[1]) # finally: # stream.close()
lgpl-3.0
-2,588,883,861,483,748,400
33.084211
69
0.598826
false
jbarriosc/ACSUFRO
LGPL/CommonSoftware/acspycommon/src/Acspy/Common/Log.py
1
30655
# @(#) $Id: Log.py,v 1.3 2012/04/23 22:46:03 javarias Exp $ # # ALMA - Atacama Large Millimiter Array # (c) Associated Universities, Inc. Washington DC, USA, 2001 # (c) European Southern Observatory, 2002 # Copyright by ESO (in the framework of the ALMA collaboration) # and Cosylab 2002, All rights reserved # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ''' An interface to logging services, including the ACS Log Server. This module is designed to be used in all Python code, and to be failsafe, so that users can rely on getting logging output in (nearly) all circumstances.Design choices derived from the design goals: 1) in the absence of the ACS Log Server to provide logging to a file similar to the standard ACS logging functionality, 2) in case of failure to log to a file, to log to stderr. Failure to log to stderr will cause an exception; I think this effect is desirable, but may change if I\'m persuaded otherwise. Logging also respects the "ACS_LOG_STDOUT" and "ACS_LOG_CENTRAL" environment variables. Last but not least, developers should use the getLogger() function instead of creating new instances of the Logger class which can take a very long time depending on managers load. ''' __revision__ = "$Id: Log.py,v 1.3 2012/04/23 22:46:03 javarias Exp $" #--REGULAR IMPORTS------------------------------------------------------------- from os import environ from inspect import stack import sys import math import logging from logging.handlers import MemoryHandler from traceback import print_exc from socket import gethostname import time import os from traceback import extract_stack from atexit import register import sched import threading import abc #--ACS Imports----------------------------------------------------------------- import Logging from Acspy.Common.ACSHandler import ACSHandler from Acspy.Common.ACSHandler import ACSFormatter from Acspy.Common.ACSHandler import ACSLogRecord from Acspy.Common.ACSHandler import makeACSLogRecord from Acspy.Common.TimeHelper import TimeUtil #--CORBA STUBS----------------------------------------------------------------- import ACSLog #--GLOBALS--------------------------------------------------------------------- # # _srcfile is used when walking the stack to check when we've got the first # caller stack frame. # if __file__[-4:].lower() in ['.pyc', '.pyo']: _srcfile = __file__[:-4] + '.py' else: _srcfile = __file__ _srcfile = os.path.normcase(_srcfile) #------------------------------------------------------------------------------ logging.TRACE = logging.NOTSET + 1 logging.addLevelName(logging.TRACE, "TRACE") logging.DELOUSE = logging.TRACE + 1 logging.addLevelName(logging.DELOUSE, "DELOUSE") logging.NOTICE = logging.INFO + 1 logging.addLevelName(logging.NOTICE, "NOTICE") logging.ALERT = logging.CRITICAL + 1 logging.addLevelName(logging.ALERT, "ALERT") logging.EMERGENCY = logging.ALERT + 1 logging.addLevelName(logging.EMERGENCY, "EMERGENCY") logging.OFF = logging.EMERGENCY + 1 logging.addLevelName(logging.OFF, "OFF") # Since the Python handlers only use the Python constants # we need to reverse map them back to the integer range. # The interpolated values, 1 and 7, are reported as their # effective log levels. RLEVELS = { logging.NOTSET : 0, logging.TRACE : 1, logging.DELOUSE : 2, logging.DEBUG : 3, logging.INFO : 4, logging.NOTICE : 5, logging.WARNING : 6, logging.ERROR : 8, logging.CRITICAL : 9, logging.ALERT : 10, logging.EMERGENCY : 11, logging.OFF : 99 } # Log Levels are received as integer in the range [0,11] # with 7 undefined. The current code interpolates # 1 and 7 to the next highest level, so that behaviour # has been incorporated in the lookup table. LEVELS = { 0 : logging.NOTSET, ACSLog.ACS_LOG_TRACE : logging.TRACE, 1 : logging.TRACE, ACSLog.ACS_LOG_DELOUSE : logging.DELOUSE, 2 : logging.DELOUSE, ACSLog.ACS_LOG_DEBUG : logging.DEBUG, 3 : logging.DEBUG, ACSLog.ACS_LOG_INFO : logging.INFO, 4 : logging.INFO, ACSLog.ACS_LOG_NOTICE : logging.NOTICE, 5 : logging.NOTICE, ACSLog.ACS_LOG_WARNING : logging.WARNING, 6 : logging.WARNING, ACSLog.ACS_LOG_ERROR : logging.ERROR, 7 : logging.ERROR, 8 : logging.ERROR, ACSLog.ACS_LOG_CRITICAL : logging.CRITICAL, 9 : logging.CRITICAL, ACSLog.ACS_LOG_ALERT : logging.ALERT, 10 : logging.ALERT, ACSLog.ACS_LOG_EMERGENCY : logging.EMERGENCY, 11 : logging.EMERGENCY, # ACSLog.ACS_LOG_OFF does not exist 99 : logging.OFF } #------------------------------------------------------------------------------ def getLevelName(lnum): return logging.getLevelName(LEVELS[lnum]) #------------------------------------------------------------------------------ #determine ACS_LOG_STDOUT if environ.has_key('ACS_LOG_STDOUT'): ACS_LOG_STDOUT = int(environ['ACS_LOG_STDOUT']) else: ACS_LOG_STDOUT = 3 #determine ACS_LOG_CENTRAL if environ.has_key('ACS_LOG_CENTRAL'): ACS_LOG_CENTRAL = int(environ['ACS_LOG_CENTRAL']) else: ACS_LOG_CENTRAL = 3 def stdoutOk(log_priority): ''' Helper method returns true if log_priority is greater than $ACS_LOG_STDOUT. ''' return (ACS_LOG_STDOUT <= RLEVELS[log_priority]) def acsPrintExcDebug(): ''' Basically identical to traceback.print_exc() only one small exception - exception information is only printed to stdout of the ACS logging level is set to DEBUG or lower. ''' if stdoutOk(logging.INFO): print_exc() #------------------------------------------------------------------------ def setCapacity(capacity): ''' Set the maximum capacity for the central log queue. Parameters: - capacity is the new maximum number of pending records Returns: Nothing Raises: NameError if no Logger object has been previously instantiated ''' if capacity > 0: CENTRALHANDLER.capacity = capacity else: CENTRALHANDLER.capacity = 0 #------------------------------------------------------------------------ def setBatchSize(batchsize): ''' Set the batch size for the central log queue. Batch size cannot exceed the capacity. Parameters: - batchsize is the new number of records to be sent as a group. Returns: Nothing Raises: NameError if no Logger object has been previously instantiated ''' if batchsize > CENTRALHANDLER.capacity: CENTRALHANDLER.batchsize = CENTRALHANDLER.capacity elif batchsize >= 0: CENTRALHANDLER.batchsize = batchsize else: CENTRALHANDLER.batchsize = 0 #------------------------------------------------------------------------ def setImmediateDispatchLevel(level): ''' Set the immediate dispatch threshold for the central log queue. Parameters: - level is the new level that triggers immediate flushing of the queue. Returns: Nothing Raises: KeyError if level is not defined NameError if no Logger object has been previously instantiated ''' CENTRALHANDLER.dispatchlevel = LEVELS[level] #------------------------------------------------------------------------ def setDefaultLevels(levels): ''' Set the default log level filtering for this process. Parameters: - level is the LogLevels object containing the new values Returns: Nothing Raises: NameError if no Logger object has been previously instantiated ''' DEFAULTLOCALHANDLER.setLevel(LEVELS[levels.minLogLevelLocal]) DEFAULTCENTRALHANDLER.setLevel(LEVELS[levels.minLogLevel]) #------------------------------------------------------------------------ def getDefaultLevels(): ''' Retrive the current default log levels Parameters: None Returns: LogLevels object containing the current default log levels. Raises: NameError if no Logger object has been previously instantiated ''' return Logging.LoggingConfigurable.LogLevels(True, RLEVELS[DEFAULTCENTRALHANDLER.level], RLEVELS[DEFAULTLOCALHANDLER.level]) #------------------------------------------------------------------------ # The ACS logging system attempts to reduce the amount of network traffic # by batching log messages. However, batching can cause long delays in # propagating the information, especially if the process doesn't log many # messages or if the log level is set at a high level. # # To address this problem, the Python logger implements a periodic flush # thread that may be used to clear the buffered messages. It is optional. # # Flush every 10 seconds is the default operation. DEFAULT_FLUSH_PERIOD = 10 # Initialize the singleton. # # FLUSHTHREAD is the thread the handles the flush processing # SCHEDULER is the manager of the event queue # NEXTEVENT is a tuple containing the information for the next scheduled event # INTERVAL is the time between events try: FLUSHTHREAD except: FLUSHTHREAD = None SCHEDULER = None NEXTEVENT = None INTERVAL = None #------------------------------------------------------------------------ def flush(): ''' Flush the messages from the buffer and schedule the next event. Returns: Nothing Raises: Nothing ''' global NEXTEVENT NEXTEVENT = SCHEDULER.enter(INTERVAL,1,flush,()) CENTRALHANDLER.flush() #------------------------------------------------------------------------ def delay(remaining): ''' Pause before checking if the next event should be processed. Parameter: - remaining is the number of seconds to wait before the next event. ''' # We can't sleep the entire interval period. If we did, we could # never change the interval. As a compromise, we check the event # queue every second. time.sleep(1) #------------------------------------------------------------------------ def startPeriodicFlush(interval=DEFAULT_FLUSH_PERIOD): ''' Configure and start the periodic flush thread. Parameter: - interval is the number of seconds between flushes Returns: Nothing Raises: Nothing ''' global FLUSHTHREAD global SCHEDULER global NEXTEVENT global INTERVAL # Only one flush thread is allowed per process if FLUSHTHREAD is None: INTERVAL = interval # Only one event queue per process if SCHEDULER is None: SCHEDULER = sched.scheduler(time.time,delay) NEXTEVENT = SCHEDULER.enter(INTERVAL,1,flush,()) FLUSHTHREAD = threading.Thread(target=SCHEDULER.run) FLUSHTHREAD.daemon = True FLUSHTHREAD.start() # To ensure a clean interpreter shutdown register(stopPeriodicFlush) #------------------------------------------------------------------------ def stopPeriodicFlush(): ''' Stop the periodic flush thread. Returns: Nothing Raises: Nothing ''' try: SCHEDULER.cancel(NEXTEVENT) except: pass FLUSHTHREAD.join() #------------------------------------------------------------------------ def setFlushInterval(interval): ''' Change the period between flushes. Parameter: - interval is the number of seconds between flushes Return: Nothing Raise: Nothing ''' global NEXTEVENT global INTERVAL if interval <= 0: # We can't go back in time so we shutdown the thread instead. stopPeriodicFlush() else: # The interval change takes effect immediately so the pending # flush has to be rescheduled INTERVAL = interval newevent = SCHEDULER.enter(INTERVAL,1,flush,()) try: SCHEDULER.cancel(NEXTEVENT) except: pass NEXTEVENT = newevent #------------------------------------------------------------------------ def isFlushRunning(): ''' Is the flush thread running? Returns: the state of the flush thread or False if thread has not been created. Raises: Nothing ''' try: return FLUSHTHREAD.isAlive() except: return False #------------------------------------------------------------------------------ class LogThrottleAlarmerBase: ''' Abstract base class for the LogThrottle to raise/clear alarms ''' __metaclass__ = abc.ABCMeta @abc.abstractmethod def sendThrottleAlarm(self, active): ''' Send/Clear the alarm for the LogThrottle Raise the alarm if active=True and clear otherwise ''' return #------------------------------------------------------------------------------ class Logger(logging.Logger): ''' Logger is of primary interest to developers. It is used to send log messages to the ACS Logging System. Developers need not create an instance of this class though as the getLogger() function returns a singled logger. ''' #------------------------------------------------------------------------ def __init__(self, name): ''' Create a Logger instance. Parameters: name of this logger Returns: Nothing Raises: Nothing ''' global LOCALHANDLER, CENTRALHANDLER, DEFAULTLOCALHANDLER, DEFAULTCENTRALHANDLER #pass it on to baseclass. by default all logs are sent to the handlers logging.Logger.__init__(self, name, logging.NOTSET) # The ACS logger uses two handlers one for local messages and one for # central messages. There should be only one pair of handlers per # process. # Create a singleton handler for the local messages. These messages # sent to stdout stream. try: LOCALHANDLER except NameError: LOCALHANDLER = logging.StreamHandler(sys.stdout) LOCALHANDLER.setFormatter(ACSFormatter()) # Create a singleton handler for messages destined for the central # logging service. try: CENTRALHANDLER except NameError: CENTRALHANDLER = ACSHandler() register(CENTRALHANDLER.flush) # The default filtering level for the local and central loggers # are held in separate handlers. By moving the management of the # logging levels to these handlers, we can allow users to set # log levels lower than the default value. # Singleton wrapper for the local message handler try: DEFAULTLOCALHANDLER except NameError: DEFAULTLOCALHANDLER = MemoryHandler(capacity=0, target=LOCALHANDLER) DEFAULTLOCALHANDLER.setLevel(LEVELS[ACS_LOG_STDOUT]) # Singleton wrapper for the central message handler try: DEFAULTCENTRALHANDLER except NameError: DEFAULTCENTRALHANDLER = MemoryHandler(capacity=0, target=CENTRALHANDLER) DEFAULTCENTRALHANDLER.setLevel(LEVELS[ACS_LOG_CENTRAL]) #create a stdout handler self.stdouthandler = DEFAULTLOCALHANDLER #create an ACS log svc handler self.acshandler = DEFAULTCENTRALHANDLER #flag to indicate if this logger is using default values self.usingDefault = True #Nested loggers should not repeat messages self.propagate = 0 #add handlers self.addHandler(self.stdouthandler) self.addHandler(self.acshandler) #------------------------------------------------------------------------ def __getCallerName(self): ''' Helper function returns the name of the calling function or method. ''' theStack=stack() try: functions=theStack[3] func_name = functions[3] except IndexError, ex: func_name = "Indeterminable Name" ffunc_name = func_name.replace('<module>', 'Main') return ffunc_name #------------------------------------------------------------------------ def __formatMessage(self, msg): ''' Helper function formats the message. ''' return "%s - %s" %(self.__getCallerName(), msg) #------------------------------------------------------------------------ def logAtLevel(self, lvl, msg): ''' Log a message at the given level. Parameters: - lvl is the log level to send the message at - msg is a string to be sent to the logging system Returns: Nothing Raises: ValueError if lvl is NOTSET or OFF ''' msg = self.__formatMessage(msg) if lvl == 0 or lvl == 99: raise ValueError("Cannot log messages at level %d" % lvl) self.log(LEVELS[lvl], msg) #------------------------------------------------------------------------ def logAlert(self, msg): ''' Log an alert message. Parameters: - msg is a string to be sent to the logging system Returns: Nothing Raises: Nothing ''' msg = self.__formatMessage(msg) self.log(LEVELS[ACSLog.ACS_LOG_ALERT], msg) #------------------------------------------------------------------------ def logCritical(self, msg): ''' Log a critical message. Parameters: - msg is a string to be sent to the logging system Returns: Nothing Raises: Nothing ''' msg = self.__formatMessage(msg) self.log(LEVELS[ACSLog.ACS_LOG_CRITICAL], msg) #------------------------------------------------------------------------ def logDebug(self, msg): ''' Log a debug message. Parameters: - msg is a string to be sent to the logging system Returns: Nothing Raises: Nothing ''' msg = self.__formatMessage(msg) self.log(LEVELS[ACSLog.ACS_LOG_DEBUG], msg) #------------------------------------------------------------------------ def logDelouse(self, msg): ''' Log a delouse message. Parameters: - msg is a string to be sent to the logging system Returns: Nothing Raises: Nothing ''' msg = self.__formatMessage(msg) self.log(LEVELS[ACSLog.ACS_LOG_DELOUSE], msg) #------------------------------------------------------------------------ def logEmergency(self, msg): ''' Log an emergency message. Parameters: - msg is a string to be sent to the logging system Returns: Nothing Raises: Nothing ''' msg = self.__formatMessage(msg) self.log(LEVELS[ACSLog.ACS_LOG_EMERGENCY], msg) #------------------------------------------------------------------------ def logError(self, msg): ''' Log an error message. Parameters: - msg is a string to be sent to the logging system Returns: Nothing Raises: Nothing ''' msg = self.__formatMessage(msg) self.log(LEVELS[ACSLog.ACS_LOG_ERROR], msg) #------------------------------------------------------------------------ def logInfo(self, msg): ''' Log an informational message. Parameters: - msg is a string to be sent to the logging system Returns: Nothing Raises: Nothing ''' msg = self.__formatMessage(msg) self.log(LEVELS[ACSLog.ACS_LOG_INFO], msg) #------------------------------------------------------------------------ def logNotice(self, msg): ''' Log a notice message. Parameters: - msg is a string to be sent to the logging system Returns: Nothing Raises: Nothing ''' msg = self.__formatMessage(msg) self.log(LEVELS[ACSLog.ACS_LOG_NOTICE], msg) #------------------------------------------------------------------------ def logTrace(self, msg): ''' Log a trace message. Parameters: - msg is a string to be sent to the logging system Returns: Nothing Raises: Nothing ''' msg = self.__formatMessage(msg) self.log(LEVELS[ACSLog.ACS_LOG_TRACE], msg) #------------------------------------------------------------------------ def logWarning(self, msg): ''' Log a warning message. Parameters: - msg is a string to be sent to the logging system Returns: Nothing Raises: Nothing ''' msg = self.__formatMessage(msg) self.log(LEVELS[ACSLog.ACS_LOG_WARNING], msg) #------------------------------------------------------------------------ def logXML(self, xml): ''' Log an XML string. Parameter: xml - XML string (really just any string, but the log server may not like anything non-XMLish -- I have not tried this) Returns: Nothing Raises: Nothing ''' self.log(LEVELS[ACSLog.ACS_LOG_DEBUG], xml) #------------------------------------------------------------------------ def logErrorTrace(self, errortrace, priority = ACSLog.ACS_LOG_ERROR): ''' Log an error stack trace. Parameter: - errortrace (top of error stack) - priorty value of logging priorty Returns: Nothing Raises: KeyError if priority is not in the ACSLog.Priorities ''' #ok to send it directly if not priority in ACSLog.Priorities._items: raise KeyError("Invalid Log Level") self.log(LEVELS[priority], 'Error Trace', extra={ 'errortrace' : errortrace, 'priority' : priority}) #------------------------------------------------------------------------ def logTypeSafe(self, priority, timestamp, msg, rtCont, srcInfo, data, audience=None, array=None, antenna=None): ''' Log a type safe log. Parameter: - priority value of logging priority - timestamp time of log creation - msg log definition shortDescription - rtCont run-time context information - srcInfo src information - data name/value pairs Returns: Nothing Raises: KeyError if priority is not in the ACSLog.Priorities ''' if not priority in ACSLog.Priorities._items: raise KeyError("Invalid Log Level") if audience is None: audience = "" if array is None: array = "" if antenna is None: antenna = "" self.log(LEVELS[priority], msg, extra={ 'priority' : priority, 'rtCont' : rtCont, 'srcInfo' : srcInfo, 'data' : data, 'audience' : audience, 'array' : array, 'antenna' : antenna}) #------------------------------------------------------------------------ def logNotSoTypeSafe(self, priority, msg, audience=None, array=None, antenna=None): ''' Log a message indicating audience, array and/or antenna. Parameter: - priority value of logging priority - msg log definition shortDescription - audience - array - antenna Returns: Nothing Raises: KeyError if priority is not in the ACSLog.Priorities ''' if not priority in ACSLog.Priorities._items: raise KeyError("Invalid Log Level") if audience is None: audience = "" if array is None: array = "" if antenna is None: antenna = "" self.log(LEVELS[priority], msg, extra={ 'priority' : priority, 'audience' : audience, 'array' : array, 'antenna' : antenna}) #------------------------------------------------------------------------ def setLevels(self, loglevel): ''' Adjust the priority level filter for log messages. Parameter: - Logging.LoggingConfigurable.LogLevels object containing new level information Returns: Nothing Raises: Nothing ''' if loglevel.useDefault and not self.usingDefault: self.usingDefault = True self.removeHandler(self.stdouthandler) self.removeHandler(self.acshandler) self.addHandler(DEFAULTLOCALHANDLER) self.addHandler(DEFAULTCENTRALHANDLER) self.stdouthandler = DEFAULTLOCALHANDLER self.acshandler = DEFAULTCENTRALHANDLER elif not loglevel.useDefault: if self.usingDefault: self.usingDefault = False self.removeHandler(self.stdouthandler) self.removeHandler(self.acshandler) self.stdouthandler = MemoryHandler(capacity=0, target=LOCALHANDLER) self.acshandler = MemoryHandler(capacity=0, target=CENTRALHANDLER) self.addHandler(self.stdouthandler) self.addHandler(self.acshandler) self.stdouthandler.setLevel(LEVELS[loglevel.minLogLevelLocal]) self.acshandler.setLevel(LEVELS[loglevel.minLogLevel]) #------------------------------------------------------------------------ def getLevels(self): ''' Return the current priority level values for the stdout and central logs. Parameter: None Returns: Logging.LoggingConfigurable.LogLevels object containing the current level settings Raises: Nothing ''' return Logging.LoggingConfigurable.LogLevels(self.usingDefault, RLEVELS[self.acshandler.level], RLEVELS[self.stdouthandler.level]) #------------------------------------------------------------------------ def findCaller(self): """ Find the stack frame of the caller so that we can note the source file name, line number and function name. """ f = logging.currentframe().f_back rv = "(unknown file)", 0, "(unknown function)" while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename) if filename == _srcfile: f = f.f_back continue rv = (filename, f.f_lineno, co.co_name) break return rv #------------------------------------------------------------------------ def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None): """ Build the ACSLogRecord for this information """ return makeACSLogRecord(name, level, fn, lno, msg, args, exc_info, func, extra) #------------------------------------------------------------------------ def configureLogging(self, maxLogsPerSec, alarmSender=None): ''' If alarmSender is not None, it must be a subclass of LogThrottleAlarmerBase See also ACSHandler.configureLogging ''' CENTRALHANDLER.configureLogging(maxLogsPerSec,alarmSender) #---------------------------------------------------------------------------- # The Python logging module contains code to manage a hierarchy of loggers. # The root logger has a default level setting of WARNING and would return # logging.Logger objects. These defaults were changed to reflect ACS # operations. logging.setLoggerClass(Logger) logging.root.setLevel(logging.NOTSET) #---------------------------------------------------------------------------- def getLogger(name=None): ''' This returns the singleton instance of logger. Used so we do not have to keep asking the slow manager for a reference to the logging service. Parameters: name of the logger Return: A logger Raises: ??? ''' return logging.getLogger(str(name)) #---------------------------------------------------------------------------- def getLoggerNames(startfilter=None): ''' This returns a list of defined known loggers. Used to support the LoggingConfigurable method get_logger_names. Parameters: a string containing the beginning of the names to be returned. Returns: A list of logger name strings Raises: Nothing ''' logkeys = logging.Logger.manager.loggerDict.keys() if startfilter: loggers = [] for l in logkeys: if l.startswith(startfilter): loggers.append(l) return loggers else: return logkeys #---------------------------------------------------------------------------- def doesLoggerExist(key_name): ''' This method determines if a logger exists for the given name. Parameters: name of the logger being queried Returns: True if named logger already exists. Raises: Nothing ''' return key_name in logging.Logger.manager.loggerDict #----------------------------------------------------------------------------
lgpl-2.1
-5,513,897,463,931,724,000
32.872928
116
0.54487
false
tcpcloud/contrail-controller
src/opserver/stats.py
1
6625
#!/usr/bin/python # # Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. # # # stats # # Query StatsOracle info from analytics # import sys import os import argparse import json import datetime from opserver_util import OpServerUtils from sandesh_common.vns.ttypes import Module from sandesh_common.vns.constants import ModuleNames, NodeTypeNames import sandesh.viz.constants as VizConstants from pysandesh.gen_py.sandesh.ttypes import SandeshType, SandeshLevel class StatQuerier(object): def __init__(self): self._args = None # end __init__ # Public functions def run(self): topdir = '/usr/share/doc/contrail-docs/html/messages/' extn = '.json' stat_schema_files = [] for dirpath, dirnames, files in os.walk(topdir): for name in files: if name.lower().endswith(extn): stat_schema_files.append(os.path.join(dirpath, name)) stat_tables = [] for schema_file in stat_schema_files: with open(schema_file) as data_file: data = json.load(data_file) for _, tables in data.iteritems(): for table in tables: if table not in stat_tables: stat_tables.append(table) stat_table_list = [xx.stat_type + "." + xx.stat_attr for xx in VizConstants._STAT_TABLES] stat_table_list.extend([xx["stat_type"] + "." + xx["stat_attr"] for xx in stat_tables]) if self.parse_args(stat_table_list) != 0: return if len(self._args.select)==0 and self._args.dtable is None: tab_url = "http://" + self._args.analytics_api_ip + ":" +\ self._args.analytics_api_port +\ "/analytics/table/StatTable." + self._args.table schematxt = OpServerUtils.get_url_http(tab_url + "/schema", self._args.admin_user, self._args.admin_password) schema = json.loads(schematxt.text)['columns'] for pp in schema: if pp.has_key('suffixes') and pp['suffixes']: des = "%s %s" % (pp['name'],str(pp['suffixes'])) else: des = "%s" % pp['name'] if pp['index']: valuetxt = OpServerUtils.get_url_http( tab_url + "/column-values/" + pp['name'], self._args.admin_user, self._args.admin_password) print "%s : %s %s" % (des,pp['datatype'], valuetxt.text) else: print "%s : %s" % (des,pp['datatype']) else: result = self.query() self.display(result) def parse_args(self, stat_table_list): """ Eg. python stats.py --analytics-api-ip 127.0.0.1 --analytics-api-port 8181 --table AnalyticsCpuState.cpu_info --where name=a6s40 cpu_info.module_id=Collector --select "T=60 SUM(cpu_info.cpu_share)" --sort "SUM(cpu_info.cpu_share)" [--start-time now-10m --end-time now] | --last 10m python stats.py --table AnalyticsCpuState.cpu_info """ defaults = { 'analytics_api_ip': '127.0.0.1', 'analytics_api_port': '8181', 'start_time': 'now-10m', 'end_time': 'now', 'select' : [], 'where' : ['Source=*'], 'sort': [] } parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.set_defaults(**defaults) parser.add_argument("--analytics-api-ip", help="IP address of Analytics API Server") parser.add_argument("--analytics-api-port", help="Port of Analytcis API Server") parser.add_argument( "--start-time", help="Logs start time (format now-10m, now-1h)") parser.add_argument("--end-time", help="Logs end time") parser.add_argument( "--last", help="Logs from last time period (format 10m, 1d)") parser.add_argument( "--table", help="StatTable to query", choices=stat_table_list) parser.add_argument( "--dtable", help="Dynamic StatTable to query") parser.add_argument( "--select", help="List of Select Terms", nargs='+') parser.add_argument( "--where", help="List of Where Terms to be ANDed", nargs='+') parser.add_argument( "--sort", help="List of Sort Terms", nargs='+') parser.add_argument( "--admin-user", help="Name of admin user", default="admin") parser.add_argument( "--admin-password", help="Password of admin user", default="contrail123") self._args = parser.parse_args() if self._args.table is None and self._args.dtable is None: return -1 try: self._start_time, self._end_time = \ OpServerUtils.parse_start_end_time( start_time = self._args.start_time, end_time = self._args.end_time, last = self._args.last) except: return -1 return 0 # end parse_args # Public functions def query(self): query_url = OpServerUtils.opserver_query_url( self._args.analytics_api_ip, self._args.analytics_api_port) if self._args.dtable is not None: rtable = self._args.dtable else: rtable = self._args.table query_dict = OpServerUtils.get_query_dict( "StatTable." + rtable, str(self._start_time), str(self._end_time), select_fields = self._args.select, where_clause = "AND".join(self._args.where), sort_fields = self._args.sort) print json.dumps(query_dict) resp = OpServerUtils.post_url_http( query_url, json.dumps(query_dict), self._args.admin_user, self._args.admin_password, sync = True) res = None if resp is not None: res = json.loads(resp) res = res['value'] return res # end query def display(self, result): if result == [] or result is None: return for res in result: print res # end display # end class StatQuerier def main(): querier = StatQuerier() querier.run() # end main if __name__ == "__main__": main()
apache-2.0
-4,580,797,401,921,861,600
34.427807
97
0.535245
false
Bryukh-Checkio-Tasks/checkio-task-area-convex-polygon
verification/tests.py
1
2154
""" TESTS is a dict with all you tests. Keys for this will be categories' names. Each test is dict with "input" -- input data for user function "answer" -- your right answer "explanation" -- not necessary key, it's using for additional info in animation. """ TESTS = { "Basics": [ {"input": [[1, 1], [9, 9], [9, 1]], "answer": 32.0}, {"input": [[4, 10], [7, 1], [1, 4]], "answer": 22.5}, {"input": [[1, 2], [3, 8], [9, 8], [7, 1]], "answer": 40.0}, {"input": [[3, 3], [2, 7], [5, 9], [8, 7], [7, 3]], "answer": 26.0}, {"input": [[7, 2], [3, 2], [1, 5], [3, 9], [7, 9], [9, 6]], "answer": 42.0}, {"input": [[4, 1], [3, 4], [3, 7], [4, 8], [7, 9], [9, 6], [7, 1]], "answer": 35.5}, {"input": [[0, 0], [10, 9], [9, 10]], "answer": 9.5}, {"input": [[3, 3], [2, 10], [10, 10], [10, 1]], "answer": 59.5}, {"input": [[3, 1], [2, 8], [4, 9], [6, 8], [9, 1]], "answer": 37.0}, {"input": [[2, 2], [2, 7], [3, 8], [7, 8], [8, 7], [5, 4]], "answer": 21.5}, {"input": [[4, 8], [2, 6], [2, 4], [4, 2], [6, 2], [8, 4], [8, 6], [6, 8]], "answer": 28.0}, {"input": [[1, 1], [1, 2], [2, 2], [2, 1]], "answer": 1.0} ], "Extra": [ {"input": [[9, 1], [9, 9], [2, 2]], "answer": 28.0}, {"input": [[1, 4], [7, 1], [3, 10]], "answer": 21.0}, {"input": [[7, 1], [9, 8], [2, 8], [1, 2]], "answer": 43.0}, {"input": [[7, 3], [8, 7], [5, 9], [2, 6], [3, 3]], "answer": 25.0}, {"input": [[9, 6], [7, 9], [3, 9], [2, 5], [3, 2], [7, 2]], "answer": 38.5}, {"input": [[7, 1], [9, 6], [7, 9], [4, 10], [3, 8], [3, 4], [4, 1]], "answer": 40.0}, {"input": [[8, 10], [10, 9], [0, 0]], "answer": 14.0}, {"input": [[10, 1], [10, 10], [1, 10], [3, 3]], "answer": 63.0}, {"input": [[9, 1], [6, 8], [4, 9], [3, 8], [3, 1]], "answer": 33.0}, {"input": [[5, 4], [8, 7], [7, 8], [3, 8], [2, 7], [2, 2]], "answer": 21.5}, {"input": [[6, 8], [8, 6], [8, 4], [6, 2], [4, 2], [2, 4], [2, 7], [4, 8]], "answer": 29.0}, {"input": [[2, 1], [2, 2], [1, 3], [1, 1]], "answer": 1.5} ] }
gpl-2.0
-4,384,417,601,549,166,600
54.230769
100
0.364438
false
shepilov-vladislav/Flask-Restless-Restangular-with-JWT-auth
server/models.py
1
3081
# -*- coding: utf-8 -*- from __future__ import absolute_import from flask.ext.security import UserMixin, RoleMixin from flask.ext.sqlalchemy import SQLAlchemy from savalidation import ValidationMixin from flask.ext.security.utils import encrypt_password, verify_password from datetime import datetime from .errors_handlers import CheckError db = SQLAlchemy() roles_users = db.Table( 'roles_users', db.Column('user_id', db.Integer(), db.ForeignKey('user.id')), db.Column('role_id', db.Integer(), db.ForeignKey('role.id')), ) class Role(db.Model, RoleMixin): id = db.Column(db.Integer(), primary_key=True) name = db.Column(db.String(80), unique=True) description = db.Column(db.String(255)) class User(db.Model, UserMixin, ValidationMixin): id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(255), unique=True) username = db.Column(db.String(50), unique=True) password = db.Column(db.String(255)) active = db.Column(db.Boolean(), default=False) confirmed_at = db.Column(db.DateTime()) roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic')) def set_password(self, password): self.password = encrypt_password(password) def check_password(self, password): return verify_password(password, self.password) def check_mail_for_uniqueness(self, new_email): if self.query.filter_by(email=new_email).first() is None: result = True else: result = False return result def check_unique_username(self, new_username): if self.query.filter_by(username=new_username).first() is None: result = True else: result = False return result def import_data(self, data): try: self.email = data['email'] except KeyError as key_err: raise CheckError('Invalid user: missing ' + key_err.args[0]) try: self.username = data['username'] except KeyError as key_err: raise CheckError('Invalid username: missing ' + key_err.args[0]) try: self.password = data['password'] except KeyError as key_err: raise CheckError('Invalid password: missing ' + key_err.args[0]) return self class Article(db.Model): __tablename__ = 'article' id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(120), nullable=False) slug = db.Column(db.String(120)) text = db.Column(db.Text, nullable=False) author = db.Column(db.Integer, db.ForeignKey('user.id')) created_at = db.Column(db.DateTime, default=db.func.now()) def __init__(self, title, text, author): self.title = title self.text = text self.author = author self.created_at = datetime.utcnow() def author_username(self): return unicode(User.query.filter_by(id=self.author).first().username) def __repr__(self): return '<Article {}>'.format(self.title)
mit
-8,428,511,970,630,879,000
32.129032
77
0.636806
false
8l/beri
cheritest/trunk/tests/mem/test_raw_ldr.py
2
2319
#- # Copyright (c) 2011 William M. Morland # All rights reserved. # # This software was developed by SRI International and the University of # Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 # ("CTSRD"), as part of the DARPA CRASH research programme. # # @BERI_LICENSE_HEADER_START@ # # Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor # license agreements. See the NOTICE file distributed with this work for # additional information regarding copyright ownership. BERI licenses this # file to you under the BERI Hardware-Software License, Version 1.0 (the # "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at: # # http://www.beri-open-systems.org/legal/license-1-0.txt # # Unless required by applicable law or agreed to in writing, Work distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # @BERI_LICENSE_HEADER_END@ # from beritest_tools import BaseBERITestCase class test_raw_ldr(BaseBERITestCase): def test_offset_zero(self): self.assertRegisterEqual(self.MIPS.a1, 0xb7b6b5b4b3b2b1fe, "LDR with zero offset failed") def test_offset_one(self): self.assertRegisterEqual(self.MIPS.a2, 0xb7b6b5b4b3b2fedc, "LDR with one offset failed") def test_offset_two(self): self.assertRegisterEqual(self.MIPS.a3, 0xb7b6b5b4b3fedcba, "LDR with two offset failed") def test_offset_three(self): self.assertRegisterEqual(self.MIPS.a4, 0xb7b6b5b4fedcba98, "LDR with three offset failed") def test_offset_four(self): self.assertRegisterEqual(self.MIPS.a5, 0xb7b6b5fedcba9876, "LDR with four offset failed") def test_offset_five(self): self.assertRegisterEqual(self.MIPS.a6, 0xb7b6fedcba987654, "LDR with five offset failed") def test_offset_six(self): self.assertRegisterEqual(self.MIPS.a7, 0xb7fedcba98765432, "LDR with six offset failed") def test_offset_seven(self): self.assertRegisterEqual(self.MIPS.t0, 0xfedcba9876543210, "LDR with seven offset failed") def test_offset_eight(self): self.assertRegisterEqual(self.MIPS.t1, 0xb7b6b5b4b3b2b1ff, "LDR with eight offset failed")
apache-2.0
-2,268,119,442,368,552,400
40.410714
92
0.773609
false
pombredanne/pytype
pytype/tests/test_inheritance.py
1
3928
"""Tests for classes, MROs, inheritance etc.""" import unittest from pytype.pytd import pytd from pytype.tests import test_inference class InheritanceTest(test_inference.InferenceTest): """Tests for class inheritance.""" @unittest.skip("needs (re-)analyzing methods on subclasses") def testSubclassAttributes(self): with self.Infer(""" class Base(object): def get_lineno(self): return self.lineno class Leaf(Base): lineno = 0 """, deep=True, solve_unknowns=False, extract_locals=False) as ty: self.assertTypesMatchPytd(ty, """ class Base: pass class Leaf(Base): lineno: int def get_lineno(self) -> int """) def testClassAttributes(self): with self.Infer(""" class A(object): pass class B(A): pass A.x = 3 A.y = 3 B.x = "foo" def ax(): return A.x def bx(): return B.x def ay(): return A.y def by(): return A.y """, deep=True, solve_unknowns=False, extract_locals=False) as ty: self.assertOnlyHasReturnType(ty.Lookup("ax"), self.int) self.assertOnlyHasReturnType(ty.Lookup("bx"), self.str) self.assertOnlyHasReturnType(ty.Lookup("ay"), self.int) self.assertOnlyHasReturnType(ty.Lookup("by"), self.int) def testMultipleInheritance(self): with self.Infer(""" class A(object): x = 1 class B(A): y = 4 class C(A): y = "str" z = 3j class D(B, C): pass def x(): return D.x def y(): return D.y def z(): return D.z """, deep=True, solve_unknowns=False, extract_locals=False) as ty: self.assertOnlyHasReturnType(ty.Lookup("x"), self.int) self.assertOnlyHasReturnType(ty.Lookup("y"), self.int) self.assertOnlyHasReturnType(ty.Lookup("z"), self.complex) @unittest.skip("Needs type parameters on inherited classes.") def testInheritFromBuiltins(self): with self.Infer(""" class MyDict(dict): def __init__(self): dict.__setitem__(self, "abc", "foo") def f(): return NoCaseKeysDict() """, deep=False, solve_unknowns=False, extract_locals=False) as ty: mydict = ty.Lookup("MyDict") self.assertOnlyHasReturnType(ty.Lookup("f"), pytd.ClassType("MyDict", mydict)) def testInheritMethodsFromObject(self): # Test that even in the presence of multi-level inheritance, # we can still see attributes from "object". with self.Infer(""" class A(object): pass class B(A): pass def f(): return A().__sizeof__() def g(): return B().__sizeof__() def h(): return "bla".__sizeof__() f(); g(); h() """, deep=False, solve_unknowns=False, extract_locals=False) as ty: self.assertOnlyHasReturnType(ty.Lookup("f"), self.int) self.assertOnlyHasReturnType(ty.Lookup("g"), self.int) self.assertOnlyHasReturnType(ty.Lookup("h"), self.int) def testMRO(self): with self.Infer(""" class A(object): def a(self): return 1 class B(A): def b(self): return 1.0 class C(A): def b(self): # ignored in D, B.b has precedence return "foo" class D(B, C): pass def f(): return A().a() def g(): return B().b() def h(): return C().b() def i(): return D().b() """, deep=True, solve_unknowns=False, extract_locals=False) as ty: self.assertOnlyHasReturnType(ty.Lookup("f"), self.int) self.assertOnlyHasReturnType(ty.Lookup("g"), self.float) self.assertOnlyHasReturnType(ty.Lookup("h"), self.str) self.assertOnlyHasReturnType(ty.Lookup("i"), self.float) if __name__ == "__main__": test_inference.main()
apache-2.0
2,658,289,285,419,811,300
27.882353
71
0.570519
false
simone-campagna/rubik
testing/rubik_testing/tests/test_program/rubik_test_interface.py
1
13352
#!/usr/bin/env python3 # # Copyright 2014 Simone Campagna # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # __author__ = "Simone Campagna" __all__ = [ 'RubikTestInterface', ] import os import numpy as np from rubik.conf import VERSION from rubik.shape import Shape from rubik.application import logo from rubik.cubes import api as cb from ...rubik_test_case import testmethod from ...rubik_test_program import RubikTestProgram class RubikTestInterface(RubikTestProgram): METHOD_NAMES = [] @testmethod def help(self): returncode, output, error = self.run_program("--help") self.assertEqual(returncode, 0) @testmethod def usage(self): returncode, output, error = self.run_program("--usage") self.assertEqual(returncode, 0) @testmethod def logo(self): returncode, output, error = self.run_program("--logo") self.assertEqual(returncode, 0) self.assertEqual(output, "{}\n".format(logo.RUBIK)) @testmethod def version(self): returncode, output, error = self.run_program("--version") self.assertEqual(returncode, 0) self.assertEqual(output, "rubik {}\n".format(VERSION)) @testmethod def dry_run(self): returncode, output, error = self.run_program("-i non_existent.tmp1 -s 4x6 -o non_existent.tmp2 --dry-run") self.assertEqual(returncode, 0) @testmethod def report_dry_run(self): returncode, output, error = self.run_program("-i non_existent.tmp1 -s 4x6 -o non_existent.tmp2 --dry-run --report") self.assertEqual(returncode, 0) @testmethod def histogram_number(self): returncode, output, error = self.run_program("-e 'cb.random_cube((4, 5))' --histogram") self.assertEqual(returncode, 0) @testmethod def histogram_percentage(self): returncode, output, error = self.run_program("-e 'cb.random_cube((4, 5))' --histogram --histogram-percentage") self.assertEqual(returncode, 0) @testmethod def histogram_bins_8(self): returncode, output, error = self.run_program("-e 'cb.random_cube((4, 5))' --histogram --histogram-bins=8 --histogram-range 0.1 0.9") self.assertEqual(returncode, 0) @testmethod def help_expression(self): returncode, output, error = self.run_program("--help-expression") self.assertEqual(returncode, 0) @testmethod def help_extractor(self): returncode, output, error = self.run_program("--help-extractor") self.assertEqual(returncode, 0) @testmethod def help_user_defined_variables(self): returncode, output, error = self.run_program("--help-user-defined-variables") self.assertEqual(returncode, 0) # @testmethod # def help_numpy(self): # returncode, output, error = self.run_program("--help-numpy") # self.assertEqual(returncode, 0) # # @testmethod # def help_cubes(self): # returncode, output, error = self.run_program("--help-cubes") # self.assertEqual(returncode, 0) @testmethod def help_filenames(self): returncode, output, error = self.run_program("--help-filenames") self.assertEqual(returncode, 0) @testmethod def help_split(self): returncode, output, error = self.run_program("--help-split") self.assertEqual(returncode, 0) @testmethod def help_environment_variables(self): returncode, output, error = self.run_program("--help-environment-variables") self.assertEqual(returncode, 0) @testmethod def help_creating_cubes(self): returncode, output, error = self.run_program("--help-creating-cubes") self.assertEqual(returncode, 0) @testmethod def help_output(self): returncode, output, error = self.run_program("--help-output") self.assertEqual(returncode, 0) @testmethod def help_memory_usage(self): returncode, output, error = self.run_program("--help-memory-usage") self.assertEqual(returncode, 0) @testmethod def help_usage(self): returncode, output, error = self.run_program("--help-usage") self.assertEqual(returncode, 0) # labeled options def impl_labeled_options(self, shape, dtype, i0_label=None, i1_label=None, i2_label=None, o0_label=None, o1_label=None): shape = Shape(shape) dtype = cb.get_dtype(dtype) file_format = 'raw' i0_label_definition = '' i1_label_definition = '' i2_label_definition = '' o0_label_definition = '' o1_label_definition = '' if i0_label is None: i0_label = 'i0' else: i0_label_definition = '{}='.format(i0_label) if i1_label is None: i1_label = 'i1' else: i1_label_definition = '{}='.format(i1_label) if i2_label is None: i2_label = 'i2' else: i2_label_definition = '{}='.format(i2_label) if o0_label is None: o0_label = 'o0' else: o0_label_definition = '{}='.format(o0_label) if o1_label is None: o1_label = 'o1' else: o1_label_definition = '{}='.format(o1_label) lc_filename_format = "lcube_{shape}_{dtype}.{format}" lc_filename = lc_filename_format.format(shape=shape, dtype=dtype.__name__, format=file_format) returncode, output, error = self.run_program( """-e 'cb.linear_cube("{s}")' -o {lc}""".format( s=shape, lc=lc_filename_format, ) ) self.assertEqual(returncode, 0) self.assertFileExistsAndHasShape(lc_filename, shape=shape, dtype=dtype) rc_shape = Shape("100x{}".format(shape)) rc_extractor = "3," + ','.join(':' for d in shape) rc_filename_format = "rcube_{shape}_{dtype}.{format}" rc_filename = rc_filename_format.format(shape=rc_shape, dtype=dtype.__name__, format=file_format) returncode, output, error = self.run_program( """-e 'cb.random_cube("{s}")' -o {rc}""".format( s=rc_shape, rc=rc_filename_format, ) ) self.assertEqual(returncode, 0) cc_filename_format = "ccube_{shape}_{dtype}.{format}" cc_filename = cc_filename_format.format(shape=shape, dtype=dtype.__name__, format=file_format) returncode, output, error = self.run_program( """-e 'cb.const_cube("{s}", value=0.2)' -o {cc}""".format( s=shape, cc=cc_filename_format, ) ) self.assertEqual(returncode, 0) self.assertFileExistsAndHasShape(cc_filename, shape=shape, dtype=dtype) o0_filename_format = "o0cube_{shape}_{dtype}.{format}" o0_file_format = 'text' o0_filename = o0_filename_format.format(shape=shape, dtype=dtype.__name__, format=o0_file_format) o1_filename_format = "o1cube_{shape}_{dtype}.{format}" o1_file_format = 'csv' o1_filename = o1_filename_format.format(shape=shape, dtype=dtype.__name__, format=o1_file_format) command = """-i '{i0ld}{lc}' -i '{i1ld}{rc}' -i '{i2ld}{cc}' -s '{s}' -s '{i1l}={rs}' -x '{i1l}={rcx}' -e '{i0l} + {i1l}' -o '{o0ld}{o0}' -e '{i0l} - {i1l}' -o '{o1ld}{o1}' -Of '{o0l}={o0f}' -Of '{o1l}={o1f}'""".format( s=shape, rs=rc_shape, lc=lc_filename_format, rc=rc_filename_format, cc=cc_filename_format, o0=o0_filename_format, o0f=o0_file_format, o1=o1_filename_format, o1f=o1_file_format, rcx=rc_extractor, i0l=i0_label, i1l=i1_label, i2l=i2_label, o0l=o0_label, o1l=o1_label, i0ld=i0_label_definition, i1ld=i1_label_definition, i2ld=i2_label_definition, o0ld=o0_label_definition, o1ld=o1_label_definition, ) returncode, output, error = self.run_program(command) self.assertEqual(returncode, 0) self.assertFileExists(o0_filename) self.assertFileExists(o1_filename) self.remove_files(rc_filename, lc_filename, cc_filename, o0_filename, o1_filename) @testmethod def labeled_options_4x5_float32(self, shape="4x5", dtype="float32"): self.impl_labeled_options(shape=shape, dtype=dtype) @testmethod def labeled_options_4x5_float32_l_r_c_x_y(self, shape="4x5", dtype="float32"): self.impl_labeled_options(shape=shape, dtype=dtype, i0_label='l', i1_label='r', i2_label='c', o0_label='x', o1_label='y') def impl_expression_filename(self, shape, dtype, mode): shape = Shape(shape) dtype = cb.get_dtype(dtype) file_format = 'raw' out_filename_format = "outcube_{mode}_{{shape}}_{{dtype}}.{{format}}".format(mode=mode) out_filename = out_filename_format.format(shape=shape, dtype=dtype.__name__, format=file_format) expr_filename = "expr_{mode}.txt".format(mode=mode) with open(expr_filename, "w") as f_out: f_out.write("""\ cube = cb.linear_cube(shape="{s}", dtype="{d}") cb.write_cube(file_format="{f}", cube=cube, file="{o}") """.format(s=shape, d=dtype.__name__, o=out_filename_format, f=file_format)) if mode == "f_option": command = "-f {e}".format(e=expr_filename) else: command = "-e '@{e}'".format(e=expr_filename) returncode, output, error = self.run_program(command) self.assertEqual(returncode, 0) self.assertFileExistsAndHasShape(out_filename, shape=shape, dtype=dtype) @testmethod def expression_filename_4x5_float32_f_option(self): self.impl_expression_filename(shape="4x5", dtype="float64", mode="f_option") @testmethod def expression_filename_8x3x2_float32_f_option(self): self.impl_expression_filename(shape="8x3x2", dtype="float64", mode="f_option") @testmethod def expression_filename_4x5_float32_at_option(self): self.impl_expression_filename(shape="4x5", dtype="float64", mode="at_option") @testmethod def expression_filename_8x3x2_float32_at_option(self): self.impl_expression_filename(shape="8x3x2", dtype="float64", mode="at_option") # view attributes def impl_view_attribute(self, attribute_name, attribute_value): returncode, output, error = self.run_program("--view-attribute {}={!r}".format(attribute_name, attribute_value)) self.assertEqual(returncode, 0) @testmethod def view_attribute_clip_symmetric(self): self.impl_view_attribute("clip_symmetric", "True") @testmethod def view_attribute_x(self): self.impl_view_attribute("x", "0.33") # view attribute files def impl_view_attributes(self, **attribute_dict): filename = "view_attributes.txt" try: with open(filename, "w") as f_out: for attribute_name, attribute_value in attribute_dict.items(): f_out.write("{}={!r}\n".format(attribute_name, attribute_value)) returncode, output, error = self.run_program("--view-attribute-file {}".format(filename)) self.assertEqual(returncode, 0) finally: os.remove(filename) @testmethod def view_attribute_file(self): self.impl_view_attributes(clip_min=0.3, clip_symmetric=True, y=1.2) # view list @testmethod def view_attribute_list(self): returncode, output, error = self.run_program("--view-list") ## interface expressions @testmethod def read_cube(self): a = np.array([[1.0, -1.3], [1.3, -0.2]], dtype=cb.get_default_dtype()) a.tofile("file_a.raw") returncode, output, error = self.run_program("""-e 'read_cube(filename="file_a.raw", shape=("{s}"), dtype="{t!r}")' '_r.sum()' --print""".format( s=Shape(a.shape), t=cb.get_dtype_name(a.dtype), )) self.assertEqual(returncode, 0) v = float(output.strip()) self.assertAlmostEqual(v, a.sum()) @testmethod def write_cube(self): returncode, output, error = self.run_program("""-e '_r = cb.as_dtype(np.array([[1.0, -1.3], [1.3, -0.2]]))' -e 'write_cube(filename="file_b.raw", cube=_r)'""") self.assertEqual(returncode, 0) self.assertFileExistsAndHasShape("file_b.raw", Shape("2x2")) @testmethod def write_cube_default(self): returncode, output, error = self.run_program("""-e '_r = cb.as_dtype(np.array([[1.0, -1.3], [1.3, -0.2]]))' -e 'write_cube(filename="file_c.raw")'""") self.assertEqual(returncode, 0) self.assertFileExistsAndHasShape("file_c.raw", Shape("2x2"))
apache-2.0
-1,450,249,721,676,834,000
37.039886
227
0.607475
false
nds/bio_assembly_refinement
bio_assembly_refinement/contig_overlap_trimmer.py
1
6984
''' Class to find and trim overlapping ends of a contig Attributes: ----------- fasta_file : input fasta file working_directory : path to working directory (default to current working directory) contigs : dict of contigs (instead of fasta file) trim : trim overlaps (default true) trim_reversed_overlaps: trims overlaps even if reversed (default false) alignments : pre-computed alignments (if available from previous step) overlap_offset: offset from edge that the overlap can start (default 1000) overlap_boundary_max : max boundary of overlap expressed as % of length of reference (default 50) overlap_min_length : minimum length of overlap (default 1KB) overlap_max_length : maximum length of overlap (default 3KB) overlap_percent_identity : percent identity of match between ends (default 85) min_trim_length : minimum trimmed length of contig over total contig length (default 0.8) summary_file : summary file (default contig_overlap_summary.txt) summary_prefix : prefix for lines in summary file debug : do not delete temp files if set to true (default false) Sample usage: ------------- ''' import os import re from pyfastaq import tasks, sequences from pyfastaq import utils as fastaqutils from pymummer import alignment from bio_assembly_refinement import utils class ContigOverlapTrimmer: def __init__(self, fasta_file='', working_directory=None, contigs={}, alignments=[], trim = True, trim_reversed_overlaps = False, overlap_offset=1000, overlap_boundary_max=50, overlap_min_length=1000, overlap_max_length=3000, overlap_percent_identity=85, min_trim_length=0.89, skip = None, summary_file = "contig_trimming_summary.txt", summary_prefix = '[contig trimmer]', debug=False): ''' Constructor ''' self.fasta_file = fasta_file self.working_directory = working_directory if working_directory else os.getcwd() self.contigs = contigs self.alignments = alignments self.trim = trim self.trim_reversed_overlaps = trim_reversed_overlaps self.overlap_offset = overlap_offset self.overlap_boundary_max = overlap_boundary_max * 0.01 self.overlap_min_length = overlap_min_length self.overlap_max_length = overlap_max_length self.overlap_percent_identity = overlap_percent_identity self.min_trim_length = min_trim_length self.ids_to_skip = utils.parse_file_or_set(skip) self.summary_file = summary_file self.summary_prefix = summary_prefix self.output_file = self._build_final_filename() self.debug = debug # Extract contigs if not self.contigs: self.contigs = {} tasks.file_to_dict(self.fasta_file, self.contigs) def _find_best_overlap(self, contig_id): ''' Look for the (best) overlap''' best_overlap = None boundary = self.overlap_boundary_max * len(self.contigs[contig_id]) for algn in self.alignments: if algn.qry_name == contig_id and \ algn.ref_name == contig_id and \ algn.ref_start < self.overlap_offset and \ algn.ref_end < boundary and \ algn.qry_start > boundary and \ algn.qry_end > (algn.qry_length - self.overlap_offset) and \ algn.hit_length_ref >= self.overlap_min_length and \ algn.hit_length_ref <= self.overlap_max_length and \ algn.percent_identity > self.overlap_percent_identity: if not best_overlap or \ (algn.ref_start <= best_overlap.ref_start and \ algn.qry_end > best_overlap.qry_end ): best_overlap = algn return best_overlap def _trim(self, contig_id, best_overlap): ''' trim overlap off the start of contig ''' original_sequence = self.contigs[contig_id] trim_start = best_overlap.ref_end+1 trim_end = best_overlap.qry_end+1 trim_status = '' if not best_overlap.on_same_strand(): if not self.trim_reversed_overlaps: trim_status = "overlap reversed, not trimming" return trim_status else: trim_start = min(best_overlap.ref_start, best_overlap.ref_end) + 1 trim_end = max(best_overlap.qry_start, best_overlap.qry_end) + 1 trim_status = "overlap reversed, trimming" trimmed_sequence = original_sequence[trim_start:trim_end] if(len(trimmed_sequence)/len(original_sequence) < self.min_trim_length): trim_status = "trimmed length would be too short, not trimming" return trim_status else: self.contigs[contig_id].seq = trimmed_sequence trim_status = "trimmed length " + str(len(trimmed_sequence)) return trim_status def _write_summary(self, contig_id, best_overlap, trim_status): '''Write summary''' if (not os.path.exists(self.summary_file)) or os.stat(self.summary_file).st_size == 0: header = '\t'.join([self.summary_prefix, 'id', 'overlap length', 'overlap location', 'trim status']) +'\n' utils.write_text_to_file(header, self.summary_file) overlap_length = '-' overlap_location = '-' if best_overlap: overlap_length = str(best_overlap.hit_length_ref) overlap_location = str(best_overlap.ref_start) + ',' + str(best_overlap.ref_end) + '-' + \ str(best_overlap.qry_start) + ',' + str(best_overlap.qry_end) else: trim_status = "no suitable overlap found" line = "\t".join([self.summary_prefix, contig_id, overlap_length, overlap_location, trim_status]) + "\n" utils.write_text_to_file(line, self.summary_file) def _build_alignments_filename(self): return os.path.join(self.working_directory, "nucmer_all_contigs.coords") def _build_final_filename(self): input_filename = os.path.basename(self.fasta_file) return os.path.join(self.working_directory, "trimmed_" + input_filename) def _build_intermediate_filename(self): input_filename = os.path.basename(self.fasta_file) return os.path.join(self.working_directory, "unsorted_trimmed_" + input_filename) def run(self): original_dir = os.getcwd() os.chdir(self.working_directory) contigs_in_file = set(self.contigs.keys()) if contigs_in_file != self.ids_to_skip and not self.alignments: self.alignments = utils.run_nucmer(self.fasta_file, self.fasta_file, self._build_alignments_filename(), min_percent_id=self.overlap_percent_identity) output_fw = fastaqutils.open_file_write(self.output_file) for contig_id in sorted(self.contigs.keys()): #Look for overlaps, trim if applicable if contig_id not in self.ids_to_skip: best_overlap = self._find_best_overlap(contig_id) trim_status = None if best_overlap and self.trim: trim_status = self._trim(contig_id, best_overlap) self._write_summary(contig_id, best_overlap, trim_status) print(sequences.Fasta(contig_id, self.contigs[contig_id].seq), file=output_fw) fastaqutils.close(output_fw) # tasks.sort_by_size(self._build_intermediate_filename(), self.output_file) # Sort contigs in final file according to size if not self.debug: utils.delete(self._build_alignments_filename()) # utils.delete(self._build_intermediate_filename()) os.chdir(original_dir)
gpl-3.0
-150,525,826,035,892,580
37.379121
152
0.70146
false
jucimarjr/IPC_2017-1
lista07/lista07_lista02_questao08.py
1
1156
#---------------------------------------------------------------------------------------------------------------------- # Introdução a Programação de Computadores - IPC # Universidade do Estado do Amazonas - UEA # Prof. Jucimar Jr # Alexandre Marques Uchôa 1715310028 # Carlos Eduardo Tapudima de Oliveira 1715310030 # Gabriel de Queiroz Sousa 1715310044 # Lucas Gabriel Silveira Duarte 1715310053 # Natália Cavalcantre Xavier 1715310021 #(a) (MAT 83) Imprimir as n primeiras linhas do triângulo de Pascal (2). #1 #1 1 #1 2 1 #1 3 3 1 #1 4 6 4 1 #1 5 10 10 5 1 #: #(b) Imprimir as n primeiras linhas do triângulo de Pascal usando apenas um vetor. matriz = [] n = int(input()) for i in range(n): lista = [] for j in range(n): lista.append(0) matriz.append(lista) for i in range(n): for j in range(n): if(i>=j): if(i==0 or i==j): matriz[i][j] = 1 else: matriz[i][j] = matriz[i-1][j-1] + matriz[i-1][j] for i in range(n): print(matriz[i])
apache-2.0
-2,453,688,487,153,752,600
23.978261
119
0.497387
false
rjschwei/azure-sdk-for-python
azure-mgmt-compute/azure/mgmt/compute/operations/disks_operations.py
1
30126
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from msrestazure.azure_operation import AzureOperationPoller import uuid from .. import models class DisksOperations(object): """DisksOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An objec model deserializer. :ivar api_version: Client Api Version. Constant value: "2016-04-30-preview". """ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2016-04-30-preview" self.config = config def create_or_update( self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, **operation_config): """Creates or updates a disk. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param disk_name: The name of the disk within the given subscription and resource group. :type disk_name: str :param disk: Disk object supplied in the body of the Put disk operation. :type disk: :class:`Disk <azure.mgmt.compute.models.Disk>` :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :rtype: :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>` instance that returns :class:`Disk <azure.mgmt.compute.models.Disk>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}' path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'diskName': self._serialize.url("disk_name", disk_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(disk, 'Disk') # Construct and send request def long_running_send(): request = self._client.put(url, query_parameters) return self._client.send( request, header_parameters, body_content, **operation_config) def get_long_running_status(status_link, headers=None): request = self._client.get(status_link) if headers: request.headers.update(headers) return self._client.send( request, header_parameters, **operation_config) def get_long_running_output(response): if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('Disk', response) if response.status_code == 202: deserialized = self._deserialize('Disk', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized if raw: response = long_running_send() return get_long_running_output(response) long_running_operation_timeout = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) return AzureOperationPoller( long_running_send, get_long_running_output, get_long_running_status, long_running_operation_timeout) def update( self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, **operation_config): """Updates (patches) a disk. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param disk_name: The name of the disk within the given subscription and resource group. :type disk_name: str :param disk: Disk object supplied in the body of the Patch disk operation. :type disk: :class:`DiskUpdate <azure.mgmt.compute.models.DiskUpdate>` :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :rtype: :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>` instance that returns :class:`Disk <azure.mgmt.compute.models.Disk>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}' path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'diskName': self._serialize.url("disk_name", disk_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(disk, 'DiskUpdate') # Construct and send request def long_running_send(): request = self._client.patch(url, query_parameters) return self._client.send( request, header_parameters, body_content, **operation_config) def get_long_running_status(status_link, headers=None): request = self._client.get(status_link) if headers: request.headers.update(headers) return self._client.send( request, header_parameters, **operation_config) def get_long_running_output(response): if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('Disk', response) if response.status_code == 202: deserialized = self._deserialize('Disk', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized if raw: response = long_running_send() return get_long_running_output(response) long_running_operation_timeout = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) return AzureOperationPoller( long_running_send, get_long_running_output, get_long_running_status, long_running_operation_timeout) def get( self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config): """Gets information about a disk. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param disk_name: The name of the disk within the given subscription and resource group. :type disk_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: :class:`Disk <azure.mgmt.compute.models.Disk>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}' path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'diskName': self._serialize.url("disk_name", disk_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('Disk', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def delete( self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config): """Deletes a disk. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param disk_name: The name of the disk within the given subscription and resource group. :type disk_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :rtype: :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>` instance that returns :class:`OperationStatusResponse <azure.mgmt.compute.models.OperationStatusResponse>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}' path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'diskName': self._serialize.url("disk_name", disk_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request def long_running_send(): request = self._client.delete(url, query_parameters) return self._client.send(request, header_parameters, **operation_config) def get_long_running_status(status_link, headers=None): request = self._client.get(status_link) if headers: request.headers.update(headers) return self._client.send( request, header_parameters, **operation_config) def get_long_running_output(response): if response.status_code not in [200, 202, 204]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized if raw: response = long_running_send() return get_long_running_output(response) long_running_operation_timeout = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) return AzureOperationPoller( long_running_send, get_long_running_output, get_long_running_status, long_running_operation_timeout) def list_by_resource_group( self, resource_group_name, custom_headers=None, raw=False, **operation_config): """Lists all the disks under a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: :class:`DiskPaged <azure.mgmt.compute.models.DiskPaged>` :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks' path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.DiskPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.DiskPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized def list( self, custom_headers=None, raw=False, **operation_config): """Lists all the disks under a subscription. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: :class:`DiskPaged <azure.mgmt.compute.models.DiskPaged>` :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks' path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.DiskPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.DiskPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized def grant_access( self, resource_group_name, disk_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config): """Grants access to a disk. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param disk_name: The name of the disk within the given subscription and resource group. :type disk_name: str :param access: Possible values include: 'None', 'Read' :type access: str or :class:`AccessLevel <azure.mgmt.compute.models.AccessLevel>` :param duration_in_seconds: Time duration in seconds until the SAS access expires. :type duration_in_seconds: int :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :rtype: :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>` instance that returns :class:`AccessUri <azure.mgmt.compute.models.AccessUri>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ grant_access_data = models.GrantAccessData(access=access, duration_in_seconds=duration_in_seconds) # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess' path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'diskName': self._serialize.url("disk_name", disk_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(grant_access_data, 'GrantAccessData') # Construct and send request def long_running_send(): request = self._client.post(url, query_parameters) return self._client.send( request, header_parameters, body_content, **operation_config) def get_long_running_status(status_link, headers=None): request = self._client.get(status_link) if headers: request.headers.update(headers) return self._client.send( request, header_parameters, **operation_config) def get_long_running_output(response): if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('AccessUri', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized if raw: response = long_running_send() return get_long_running_output(response) long_running_operation_timeout = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) return AzureOperationPoller( long_running_send, get_long_running_output, get_long_running_status, long_running_operation_timeout) def revoke_access( self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config): """Revokes access to a disk. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param disk_name: The name of the disk within the given subscription and resource group. :type disk_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :rtype: :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>` instance that returns :class:`OperationStatusResponse <azure.mgmt.compute.models.OperationStatusResponse>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess' path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'diskName': self._serialize.url("disk_name", disk_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request def long_running_send(): request = self._client.post(url, query_parameters) return self._client.send(request, header_parameters, **operation_config) def get_long_running_status(status_link, headers=None): request = self._client.get(status_link) if headers: request.headers.update(headers) return self._client.send( request, header_parameters, **operation_config) def get_long_running_output(response): if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized if raw: response = long_running_send() return get_long_running_output(response) long_running_operation_timeout = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) return AzureOperationPoller( long_running_send, get_long_running_output, get_long_running_status, long_running_operation_timeout)
mit
-5,214,372,820,157,999,000
43.04386
144
0.627631
false
bitmazk/cmsplugin-accordion
setup.py
1
1844
# -*- encoding: utf-8 -*- """ Python setup file for the cmsplugin_accordion app. In order to register your app at pypi.python.org, create an account at pypi.python.org and login, then register your new app like so: python setup.py register If your name is still free, you can now make your first release but first you should check if you are uploading the correct files: python setup.py sdist Inspect the output thoroughly. There shouldn't be any temp files and if your app includes staticfiles or templates, make sure that they appear in the list. If something is wrong, you need to edit MANIFEST.in and run the command again. If all looks good, you can make your first release: python setup.py sdist upload For new releases, you need to bump the version number in cmsplugin_accordion/__init__.py and re-run the above command. For more information on creating source distributions, see http://docs.python.org/2/distutils/sourcedist.html """ import os from setuptools import setup, find_packages import cmsplugin_accordion as app dev_requires = [ 'flake8', ] install_requires = [ 'django', ] def read(fname): try: return open(os.path.join(os.path.dirname(__file__), fname)).read() except IOError: return '' setup( name="cmsplugin-accordion", version=app.__version__, description=read('DESCRIPTION'), long_description=read('README.rst'), license='The MIT License', platforms=['OS Independent'], keywords='django, app, reusable, django-cms, cmsplugin, plugin, accordion', author='Martin Brochhaus', author_email='[email protected]', url="https://github.com/bitmazk/cmsplugin-accordion", packages=find_packages(), include_package_data=True, install_requires=install_requires, extras_require={ 'dev': dev_requires, }, )
mit
7,873,863,813,639,560,000
26.522388
79
0.713124
false
daveoncode/python-string-utils
tests/test_is_palindrome.py
1
1571
from unittest import TestCase from string_utils import is_palindrome class IsPalindromeTestCase(TestCase): def test_non_string_objects_return_false(self): # noinspection PyTypeChecker self.assertFalse(is_palindrome(1)) # noinspection PyTypeChecker self.assertFalse(is_palindrome(['xx'])) # noinspection PyTypeChecker self.assertFalse(is_palindrome({})) # noinspection PyTypeChecker self.assertFalse(is_palindrome(False)) # noinspection PyTypeChecker self.assertFalse(is_palindrome((1, 2, 3))) # noinspection PyTypeChecker self.assertFalse(is_palindrome(object())) def test_empty_strings_are_not_palindromes(self): self.assertFalse(is_palindrome('')) self.assertFalse(is_palindrome(' ')) self.assertFalse(is_palindrome('\n\t\n')) def test_returns_true_if_palindrome_with_default_options(self): self.assertTrue(is_palindrome('LOL')) self.assertTrue(is_palindrome('otto')) def test_returns_false_if_not_palindrome_with_default_options(self): self.assertFalse(is_palindrome('nope!')) self.assertFalse(is_palindrome('ROTFL')) def test_if_not_specified_case_matters(self): self.assertFalse(is_palindrome('Lol')) self.assertTrue(is_palindrome('Lol', ignore_case=True)) def test_if_not_specified_spaces_matter(self): self.assertFalse(is_palindrome('i topi non avevano nipoti')) self.assertTrue(is_palindrome('i topi non avevano nipoti', ignore_spaces=True))
mit
2,184,163,038,154,132,500
33.911111
87
0.681095
false
ptphp/PyLib
src/tornado/demos/lihuashu/test/serviceUpload.py
1
1648
#!user/bin/env python # -*- coding: utf8 -*- ''' Created on Jul 11, 2012 @author: joseph ''' import urllib,urllib2 import mimetypes def uploadfile(fields, files): BOUNDARY = '----------267402204411258' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) for (key, filename, value) in files: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) L.append('Content-Type: %s' % mimetypes.guess_type(filename)[0] or 'application/octet-stream') L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body if __name__ == '__main__': fields=[ ('_xsrf','28d55624808042768af23188e318500a') ] ifile = "/home/joseph/Pictures/1.jpg" imgdata= file(ifile,"rb") files=[ ('ifile',imgdata.name,imgdata.read()) ] content_type, upload_data = uploadfile(fields, files) uploadheader={ "User-Agent": "Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1", 'Content-Type': content_type, 'Content-Length': str(len(upload_data)) } request = urllib2.Request("http://localhost/upload/", upload_data, uploadheader) res = urllib2.urlopen(request) print res.read()
apache-2.0
2,198,703,158,033,846,500
28.981818
104
0.555825
false
uber-common/opentracing-python
example/zipkin_like/zipkin_thrift/zipkin_collector.py
1
1445
# Copyright (c) 2015 Uber Technologies, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import, unicode_literals, print_function import os import sys from tchannel import thrift zipkin_collector = thrift.load( os.path.join(os.path.dirname(__file__), 'zipkinCore.thrift'), service='tcollector', ) # Replace this module with the generated module. sys.modules[__name__] = zipkin_collector
mit
5,431,431,284,988,255,000
41.5
79
0.768858
false
cylc/cylc
cylc/flow/loggingutil.py
1
9067
# THIS FILE IS PART OF THE CYLC SUITE ENGINE. # Copyright (C) NIWA & British Crown (Met Office) & Contributors. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Logging utilities. This module provides: - A custom rolling file handler for suite logs with date-time names. - A formatter with ISO date time and indented multi-line messages. Note: The ISO date time bit is redundant in Python 3, because "time.strftime" will handle time zone from "localtime" properly. """ import os import re import sys import logging import textwrap from glob import glob from functools import partial from ansimarkup import parse as cparse from cylc.flow.wallclock import (get_current_time_string, get_time_string_from_unix_time) from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.pathutil import get_suite_run_log_name class CylcLogFormatter(logging.Formatter): """Format log record in standard Cylc way. Message in '%(asctime)s %(levelname)-2s - %(message)s' format. Indent continuation in multi-line messages. Date time in ISO date time with correct time zone. """ COLORS = { 'CRITICAL': cparse('<red><bold>{0}</bold></red>'), 'ERROR': cparse('<red>{0}</red>'), 'WARNING': cparse('<yellow>{0}</yellow>'), 'DEBUG': cparse('<fg #888888>{0}</fg #888888>') } # default hard-coded max width for log entries # NOTE: this should be sufficiently long that log entries read by the # deamonise script (url, pid) are not wrapped MAX_WIDTH = 999 def __init__(self, timestamp=True, color=False, max_width=None): self.timestamp = None self.color = None self.max_width = self.MAX_WIDTH self.wrapper = None self.configure(timestamp, color, max_width) # You may find adding %(filename)s %(lineno)d are useful when debugging logging.Formatter.__init__( self, '%(asctime)s %(levelname)-2s - %(message)s', '%Y-%m-%dT%H:%M:%S%Z') def configure(self, timestamp=None, color=None, max_width=None): """Reconfigure the format settings.""" if timestamp is not None: self.timestamp = timestamp if color is not None: self.color = color if max_width is not None: self.max_width = max_width if self.max_width is None: self.wrapper = lambda x: [x] else: self.wrapper = partial(textwrap.wrap, width=self.max_width) def format(self, record): """Indent continuation lines in multi-line messages.""" text = logging.Formatter.format(self, record) if not self.timestamp: _, text = text.split(' ', 1) # ISO8601 time points have no spaces if self.color and record.levelname in self.COLORS: text = self.COLORS[record.levelname].format(text) return '\n\t'.join(( wrapped_line for line in text.splitlines() for wrapped_line in self.wrapper(line) )) def formatTime(self, record, datefmt=None): """Formats the record time as an ISO date time with correct time zone. Note: This should become redundant in Python 3, because "time.strftime" will handle time zone from "localtime" properly. """ return get_time_string_from_unix_time(record.created) class TimestampRotatingFileHandler(logging.FileHandler): """Rotating suite logs using creation time stamps for names. Argument: suite (str): suite name no_detach (bool): non-detach mode? (Default=False) """ FILE_HEADER_FLAG = 'cylc_log_file_header' FILE_NUM = 'cylc_log_num' GLBL_KEY = 'suite logging' MIN_BYTES = 1024 def __init__(self, suite, no_detach=False, timestamp=True): logging.FileHandler.__init__(self, get_suite_run_log_name(suite)) self.no_detach = no_detach self.stamp = None self.formatter = CylcLogFormatter(timestamp=timestamp) self.header_records = [] def emit(self, record): """Emit a record, rollover log if necessary.""" try: if self.should_rollover(record): self.do_rollover() if record.__dict__.get(self.FILE_HEADER_FLAG): self.header_records.append(record) logging.FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handleError(record) def should_rollover(self, record): """Should rollover?""" if self.stamp is None or self.stream is None: return True max_bytes = glbl_cfg().get([self.GLBL_KEY, 'maximum size in bytes']) if max_bytes < self.MIN_BYTES: # No silly value max_bytes = self.MIN_BYTES msg = "%s\n" % self.format(record) try: # due to non-posix-compliant Windows feature self.stream.seek(0, 2) except ValueError as exc: # intended to catch - ValueError: I/O operation on closed file raise SystemExit(exc) return self.stream.tell() + len(msg.encode('utf8')) >= max_bytes def do_rollover(self): """Create and rollover log file if necessary.""" # Generate new file name self.stamp = get_current_time_string(use_basic_format=True) filename = self.baseFilename + '.' + self.stamp os.makedirs(os.path.dirname(filename), exist_ok=True) # Touch file with open(filename, 'w+'): os.utime(filename, None) # Update symlink if (os.path.exists(self.baseFilename) or os.path.lexists(self.baseFilename)): os.unlink(self.baseFilename) os.symlink(os.path.basename(filename), self.baseFilename) # Housekeep log files arch_len = glbl_cfg().get([self.GLBL_KEY, 'rolling archive length']) if arch_len: log_files = glob(self.baseFilename + '.*') log_files.sort() while len(log_files) > arch_len: os.unlink(log_files.pop(0)) # Reopen stream, redirect STDOUT and STDERR to log if self.stream: self.stream.close() self.stream = None self.stream = self._open() # Dup STDOUT and STDERR in detach mode if not self.no_detach: os.dup2(self.stream.fileno(), sys.stdout.fileno()) os.dup2(self.stream.fileno(), sys.stderr.fileno()) # Emit header records (should only do this for subsequent log files) for header_record in self.header_records: if self.FILE_NUM in header_record.__dict__: # Increment log file number header_record.__dict__[self.FILE_NUM] += 1 header_record.args = header_record.args[0:-1] + ( header_record.__dict__[self.FILE_NUM],) logging.FileHandler.emit(self, header_record) class ReferenceLogFileHandler(logging.FileHandler): """A handler class which writes filtered reference logging records to disk files. """ REF_LOG_TEXTS = ( 'triggered off', 'Initial point', 'Start point', 'Final point') """List of texts used for filtering messages.""" def __init__(self, filename): """Create the reference log file handler, specifying the file to write the reference log lines.""" try: os.unlink(filename) except OSError: pass super().__init__(filename) self.formatter = logging.Formatter('%(message)s') self.addFilter(self._filter) def _filter(self, record): """Filter a logging record. From the base class Filterer (parent of logging.Handler). Args: record (logging.LogRecord): a log record. Returns: bool: True for message to be logged, False otherwise. """ return any(text in record.getMessage() for text in self.REF_LOG_TEXTS) LOG_LEVEL_REGEXES = [ ( re.compile(r'(^.*%s.*\n((^\t.*\n)+)?)' % level, re.M), replacement.format(r'\1') ) for level, replacement in CylcLogFormatter.COLORS.items() ] def re_formatter(log_string): """Read in an uncoloured log_string file and apply colour formatting.""" for sub, repl in LOG_LEVEL_REGEXES: log_string = sub.sub(repl, log_string) return log_string
gpl-3.0
4,419,167,219,824,265,700
36.466942
79
0.618286
false
cherrygirl/micronaet7
crm_quotation/__init__.py
1
1329
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP module # Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>) and the # Italian OpenERP Community (<http://www.openerp-italia.com>) # # ######################################################################## # OpenERP, Open Source Management Solution # Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import quotation import report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-8,757,332,320,134,590,000
40.53125
78
0.585403
false
eblade/radiant
radiant/tk.py
1
2650
#!/usr/bin/env python3 try: # Python 3 import tkinter as tk from tkinter import ttk from tkinter import filedialog except ImportError: # Python 2 import Tkinter as tk import ttk import tkFileDialog as filedialog import os class Dialog(tk.Toplevel): def __init__(self, parent, title=None, value=None, focus=None): tk.Toplevel.__init__(self, parent) self.transient(parent) if title: self.title(title) self.parent = parent self.focus_after = focus or parent self.result = None self.cancelled = True self.value = value body = tk.Frame(self) self.initial_focus = self.body(body) body.pack(padx=5, pady=5) self.buttonbox() self.grab_set() if not self.initial_focus: self.initial_focus = self self.protocol("WM_DELETE_WINDOW", self.cancel) self.geometry("+%d+%d" % (parent.winfo_rootx()+50, parent.winfo_rooty()+50)) self.initial_focus.focus_set() self.wait_window(self) # # construction hooks def body(self, master): # create dialog body. return widget that should have # initial focus. this method could be overridden tk.Label(master, text="Value").grid(row=0) self.entry = tk.Entry(master) if self.value: self.entry.insert(0, self.value) self.entry.grid(row=0, column=1) return self.entry def buttonbox(self): # add standard button box. override if you don't want the # standard buttons box = tk.Frame(self) w = tk.Button(box, text="OK", width=10, command=self.ok, default=tk.ACTIVE) w.pack(side=tk.LEFT, padx=5, pady=5) w = tk.Button(box, text="Cancel", width=10, command=self.cancel) w.pack(side=tk.LEFT, padx=5, pady=5) self.bind("<Return>", self.ok) self.bind("<Escape>", self.cancel) box.pack() # # standard button semantics def ok(self, event=None): if not self.validate(): self.initial_focus.focus_set() # put focus back return self.withdraw() self.update_idletasks() self.apply() self.cancelled = False self.cancel() def cancel(self, event=None): # put focus back to the parent window self.focus_after.focus_set() self.destroy() # # command hooks def validate(self): return 1 # override def apply(self): self.value = self.entry.get()
mit
-5,054,887,378,133,606,000
21.457627
83
0.568679
false
LaboratoireMecaniqueLille/crappy
crappy/tool/videoextenso.py
1
12062
# coding: utf-8 from multiprocessing import Process, Pipe import numpy as np from .._global import OptionalModule try: import cv2 except (ModuleNotFoundError, ImportError): cv2 = OptionalModule("opencv-python") try: from skimage.filters import threshold_otsu from skimage.morphology import label from skimage.measure import regionprops except (ModuleNotFoundError, ImportError): label = OptionalModule("skimage", "Please install scikit-image to use" "Video-extenso") threshold_otsu = regionprops = label class LostSpotError(Exception): pass def overlapping(box1, box2): """Returns :obj:`True` if `box1` and `box2` are overlapping or included in each other""" for i in box1[::2]: if box2[0] < i < box2[2]: if not (box1[3] <= box2[1] or box2[3] <= box1[1]): return True for i in box1[1::2]: if box2[1] < i < box2[3]: if not (box1[2] <= box2[0] or box2[2] <= box1[0]): return True # Inclusion for b1, b2 in ((box1, box2), (box2, box1)): if (b1[0] <= b2[0] <= b2[2] <= b1[2]) and \ (b1[1] <= b2[1] <= b2[3] <= b1[3]): return True return False class Video_extenso(object): """The basic VideoExtenso class. It will detect the spots, save the initial position, and return the measured deformation in the most simple way: - It will always return a :obj:`list` of the spots coordinates (in pixel). - It will return `Exx`, `Eyy`, projections of the length of the bounding box of the spot on each axis, divided by its original length. Note: Can detect 2,3 or 4 spots. """ def __init__(self, white_spots=False, update_thresh=False, num_spots="auto", safe_mode=False, border=5, min_area=150, blur=5): """Sets the arguments. Args: white_spots: Set to :obj:`True` if the spots are lighter than the surroundings, else set to :obj:`False`. update_thresh: Should the threshold be updated in each round ? If so there are lower chances to lose the spots but there will be more noise in the measurement. num_spots: The number of spots to detect. Helps for spot detection and allows to force detection of a given number of spots (`"auto"` works fine most of the time). Can be set to: :: "auto", 2, 3, 4 safe_mode: If set to :obj:`False`, it will try hard to catch the spots when losing them. Could result in incoherent values without crash. Set to :obj:`True` when security is a concern. border: The number of pixels that will be added to the limits of the boundingbox. min_area: Filters regions with an area smaller than this value among the selected regions. blur: Median blur to be added to the image to smooth out irregularities and make detection more reliable. """ self.white_spots = white_spots self.update_thresh = update_thresh self.num_spots = num_spots self.safe_mode = safe_mode self.border = border self.min_area = min_area self.blur = blur assert self.num_spots in ['auto', 2, 3, 4], "Invalid number of spots!" self.spot_list = [] self.fallback_mode = False self.consecutive_overlaps = 0 # This number of pixel will be added to the window sending the # spot image to the process def detect_spots(self, img, oy, ox): """Detects the spots in `img`, subframe of the full image. Note: `ox` and `oy` represent the offset of the subframe in the full image. """ # Finding out how many spots we should detect # If L0 is already saved, we have already counted the spots, else # see the num_spot parameter # img = rank.median(img, np.ones((15, 15), dtype=img.dtype)) if self.blur and self.blur > 1: img = cv2.medianBlur(img, self.blur) self.thresh = threshold_otsu(img) if self.white_spots: bw = img > self.thresh else: bw = img <= self.thresh # bw = dilation(bw,np.ones((3, 3), dtype=img.dtype)) # bw = erosion(bw,np.ones((3, 3), dtype=img.dtype)) bw = label(bw) # Is it really useful? # bw[0, :] = bw[-1, :] = bw[:, 0] = bw[:, -1] = -1 reg_prop = regionprops(bw) # Remove the regions that are clearly not spots reg_prop = [r for r in reg_prop if r.solidity > .8] # Remove the too small regions (150 is reeally tiny) reg_prop = [r for r in reg_prop if r.area > self.min_area] reg_prop = sorted(reg_prop, key=lambda r: r.area, reverse=True) i = 0 while i < len(reg_prop) - 1: r1 = reg_prop[i] for j in range(i + 1, len(reg_prop)): r2 = reg_prop[j] if overlapping(r1['bbox'], r2['bbox']): print("Overlap") if r1.area > r2.area: del reg_prop[j] else: del reg_prop[i] i -= 1 break i += 1 if self.num_spots == 'auto': # Remove the smallest region until we have a valid number # and all of them are larger than "min_area" pix while len(reg_prop) not in [0, 2, 3, 4]: del reg_prop[-1] if len(reg_prop) == 0: print("Not spots found!") return else: if len(reg_prop) < self.num_spots: print("Found only", len(reg_prop), "spots when expecting", self.num_spots) return reg_prop = reg_prop[:self.num_spots] # Keep the largest ones print("Detected", len(reg_prop), "spots") self.spot_list = [] for r in reg_prop: d = {} y, x = r.centroid d['y'] = oy + y d['x'] = ox + x # l1 = r.major_axis_length # l2 = r.minor_axis_length # s, c = np.sin(r.orientation) ** 2, np.cos(r.orientation) ** 2 # lx = (l1 * c + l2 * s) / 2 # ly = (l1 * s + l2 * c) / 2 # d['bbox'] = d['y'] - ly,d['x'] - lx,d['y'] + ly,d['x'] + lx # d['bbox'] = d['min_col'], d['min_row'], d['max_col'], d['max_row'] # d['bbox'] = tuple([int(i + .5) for i in d['bbox']]) d['bbox'] = tuple([r['bbox'][i] + (oy, ox)[i % 2] for i in range(4)]) self.spot_list.append(d) print(self.spot_list) def save_length(self): if not hasattr(self, "spot_list"): print("You must select the spots first!") return if not hasattr(self, "tracker"): self.start_tracking() y = [s['y'] for s in self.spot_list] x = [s['x'] for s in self.spot_list] self.l0y = max(y) - min(y) self.l0x = max(x) - min(x) self.num_spots = len(self.spot_list) def enlarged_window(self, window, shape): """Returns the slices to get the window around the spot.""" s1 = slice(max(0, window[0] - self.border), min(shape[0], window[2] + self.border)) s2 = slice(max(0, window[1] - self.border), min(shape[1], window[3] + self.border)) return s1, s2 def start_tracking(self): """Will spawn a process per spot, which goal is to track the spot and send the new coordinate after each update.""" self.tracker = [] self.pipe = [] for _ in self.spot_list: i, o = Pipe() self.pipe.append(i) self.tracker.append(Tracker(o, white_spots=self.white_spots, thresh='auto' if self.update_thresh else self.thresh, safe_mode=self.safe_mode, blur=self.blur)) self.tracker[-1].start() def get_def(self, img): """The "heart" of the videoextenso. Will keep track of the spots and return the computed deformation. """ if not hasattr(self, "l0x"): print("L0 not saved, saving it now.") self.save_length() for p, s in zip(self.pipe, self.spot_list): win = self.enlarged_window(s['bbox'], img.shape) # print("DEBUG: win is", s['bbox'], "sending", win) p.send(((win[0].start, win[1].start), img[win])) ol = False for p, s in zip(self.pipe, self.spot_list): r = p.recv() if isinstance(r, str): self.stop_tracking() raise LostSpotError("Tracker returned"+r) lst = list(self.spot_list) lst.remove(s) # Please excuse me for the following line, # understand: "if this box overlaps any existing box" if any([overlapping(a_b[0], a_b[1]['bbox']) for a_b in zip([r['bbox']] * len(lst), lst)]): if self.safe_mode: print("Overlapping!") self.stop_tracking() raise LostSpotError("[safe mode] Overlap") print("Overlap! Reducing spot window...") ol = True s['bbox'] = (min(s['bbox'][0] + 1, int(s['y']) - 2), min(s['bbox'][1] + 1, int(s['x']) - 2), max(s['bbox'][2] - 1, int(s['y']) + 2), max(s['bbox'][3] - 1, int(s['x']) + 2)) continue s.update(r) # print("DEBUG updating spot to", s) if ol: self.consecutive_overlaps += 1 if self.consecutive_overlaps >= 10: print("Too many overlaps, I give up!") raise LostSpotError("Multiple overlaps") else: self.consecutive_overlaps = 0 y = [s['y'] for s in self.spot_list] x = [s['x'] for s in self.spot_list] eyy = (max(y) - min(y)) / self.l0y - 1 exx = (max(x) - min(x)) / self.l0x - 1 return [100 * eyy, 100 * exx] def stop_tracking(self): for p in self.pipe: p.send(("", "stop")) class Tracker(Process): """Process tracking a spot for videoextensometry.""" def __init__(self, pipe, white_spots=False, thresh='auto', safe_mode=True, blur=False): Process.__init__(self) self.pipe = pipe self.white_spots = white_spots self.safe_mode = safe_mode self.blur = blur self.fallback_mode = False if thresh == 'auto': self.auto_thresh = True else: self.auto_thresh = False self.thresh = thresh def run(self): # print("DEBUG: process starting, thresh=", self.thresh) while True: try: offset, img = self.pipe.recv() except KeyboardInterrupt: break if type(img) != np.ndarray: break oy, ox = offset try: r = self.evaluate(img) except Exception: raise LostSpotError if not isinstance(r, dict): r = self.fallback(img) if not isinstance(r, dict): raise LostSpotError else: self.fallback_mode = False r['y'] += oy r['x'] += ox miny, minx, maxy, maxx = r['bbox'] # print("DEBUG: bbox=", r['bbox']) r['bbox'] = miny + oy, minx + ox, maxy + oy, maxx + ox # print("DEBUG: new bbox=", r['bbox']) self.pipe.send(r) # print("DEBUG: Process terminating") def evaluate(self, img): if self.blur and self.blur > 1: img = cv2.medianBlur(img, self.blur) if self.auto_thresh: self.thresh = threshold_otsu(img) if self.white_spots: bw = (img > self.thresh).astype(np.uint8) else: bw = (img <= self.thresh).astype(np.uint8) # cv2.imshow(self.name, bw * 255) # cv2.waitKey(5) if not .1 * img.size < np.count_nonzero(bw) < .8 * img.size: print("reevaluating threshold!!") print("Ratio:", np.count_nonzero(bw) / img.size) print("old:", self.thresh) self.thresh = threshold_otsu(img) print("new:", self.thresh) m = cv2.moments(bw) r = {} try: r['x'] = m['m10'] / m['m00'] r['y'] = m['m01'] / m['m00'] except ZeroDivisionError: return -1 x, y, w, h = cv2.boundingRect(bw) # if (h, w) == img.shape: # return -1 r['bbox'] = y, x, y + h, x + w return r def fallback(self, img): """Called when the spots are lost.""" if self.safe_mode or self.fallback_mode: if self.fallback_mode: self.pipe.send("Fallback failed") else: self.pipe.send("[safe mode] Could not compute barycenter") self.fallback_mode = False return -1 self.fallback_mode = True print("Loosing spot! Trying to reevaluate threshold...") self.thresh = threshold_otsu(img) return self.evaluate(img)
gpl-2.0
-2,730,020,175,331,654,700
32.22865
78
0.577019
false
zstackio/zstack-woodpecker
integrationtest/vm/vpc/test_vpc_dr_overvxlan.py
1
8938
''' @author: Pengtao.Zhang ''' import zstackwoodpecker.test_util as test_util import zstackwoodpecker.test_lib as test_lib import zstackwoodpecker.operations.resource_operations as res_ops import zstackwoodpecker.test_state as test_state import zstackwoodpecker.operations.vm_operations as vm_ops import zstackwoodpecker.operations.host_operations as host_ops import zstackwoodpecker.operations.vpc_operations as vpc_ops import zstackwoodpecker.operations.host_operations as host_ops import random import os import zstackwoodpecker.operations.volume_operations as vol_ops import time import apibinding.api_actions as api_actions import subprocess import zstacklib.utils.ssh as ssh VLAN1_NAME, VLAN2_NAME = ['l3VlanNetworkName1', "l3VlanNetwork2"] VXLAN1_NAME, VXLAN2_NAME = ["l3VxlanNetwork11", "l3VxlanNetwork12"] test_stub = test_lib.lib_get_test_stub() test_obj_dict = test_state.TestStateDict() def test(): hosts_uuid = [] for i in res_ops.get_resource(res_ops.HOST): hosts_uuid.append(i.uuid) test_util.test_dsc("create vpc vrouter and attach vpc l3 to vpc") vr = test_stub.create_vpc_vrouter() test_stub.attach_l3_to_vpc_vr(vr) conf = res_ops.gen_query_conditions('name', '=', 'test_vpc') vr_host_uuid = res_ops.query_resource(res_ops.APPLIANCE_VM, conf)[0].hostUuid vr_uuid = res_ops.query_resource(res_ops.APPLIANCE_VM, conf)[0].uuid vr_bridge = 'vr' + ''.join(list(vr_uuid)[0:6]) hosts_uuid.remove(vr_host_uuid) test_util.test_dsc("enable dr vxlan") vpc_ops.set_vpc_dr_vxlan(vr_uuid, 'enable') vm1 = test_stub.create_vm('test_vm1', 'image_for_sg_test', 'l3VxlanNetwork11', host_uuid = hosts_uuid[0]) vm2 = test_stub.create_vm('test_vm2', 'image_for_sg_test', 'l3VxlanNetwork12', host_uuid = hosts_uuid[1]) vm1_host_ip = test_lib.lib_get_vm_host(vm1.get_vm()).managementIp vm2_host_ip = test_lib.lib_get_vm_host(vm2.get_vm()).managementIp [test_obj_dict.add_vm(vm) for vm in (vm1,vm2)] [vm.check() for vm in (vm1,vm2)] vip = test_stub.create_vip() test_obj_dict.add_vip(vip) eip = test_stub.create_eip(eip_name = 'test eip', vip_uuid = vip.get_vip().uuid, vnic_uuid = vm1.get_vm().vmNics[0].uuid, vm_obj = vm1.get_vm().uuid) eip_ip = eip.get_eip().vipIp vm1_ip = vm1.get_vm().vmNics[0].ip vm2_ip = vm2.get_vm().vmNics[0].ip time.sleep(30) cmd1 = 'sshpass -p password ssh root@%s \"ping -c 100 %s \"' % (eip_ip, vm2_ip) child1 = subprocess.Popen(cmd1,shell=True) time.sleep(5) subprocess.call('sshpass -p password scp /etc/yum.repos.d/zstack-local.repo root@%s:/etc/yum.repos.d' % vm1_host_ip, shell=True) subprocess.call('sshpass -p password scp /etc/yum.repos.d/zstack-local.repo root@%s:/etc/yum.repos.d' % vm2_host_ip, shell=True) subprocess.call('sshpass -p password ssh root@%s yum --disablerepo=* --enablerepo=zstack-local install tcpdump -y' % vm1_host_ip, shell=True) subprocess.call('sshpass -p password ssh root@%s yum --disablerepo=* --enablerepo=zstack-local install tcpdump -y' % vm2_host_ip, shell=True) cmd2 = 'sshpass -p password ssh root@%s \"tcpdump -c 5 -i %s > /tmp/host1_tcpdump.log\"' % (vm1_host_ip, vr_bridge) child2 = subprocess.Popen(cmd2,shell=True) cmd3 = 'sshpass -p password ssh root@%s \"tcpdump -c 5 -i %s > /tmp/host2_tcpdump.log\"' % (vm2_host_ip, vr_bridge) child3 = subprocess.Popen(cmd3,shell=True) time.sleep(10) cmd4 = 'sshpass -p password scp root@%s:/tmp/host1_tcpdump.log /root' % (vm1_host_ip) child4 = subprocess.call(cmd4,shell=True) cmd5 = 'sshpass -p password scp root@%s:/tmp/host2_tcpdump.log /root' % (vm2_host_ip) child5 = subprocess.call(cmd5,shell=True) with open('/root/host1_tcpdump.log' ,'r') as fd: line = fd.readlines() if vm1_ip in line[0] and vm2_ip in line[0]: with open('/root/host2_tcpdump.log' ,'r') as fdd: linee = fdd.readlines() if vm1_ip in linee[0] and vm2_ip in linee[0]: test_util.test_dsc("check tcpdump from vr bridge pass") os.system('rm -f /root/host1_tcpdump.log /root/host2_tcpdump.log') else: test_util.test_fail('check tcpdump from vr bridge failed') else: test_util.test_fail('check tcpdump from vr bridge failed') subprocess.call('sshpass -p password ssh root@%s \"pkill ping\"' % (eip_ip),shell=True) test_util.test_dsc("reconnect vpc and check again") vr.reconnect() cmd1 = 'sshpass -p password ssh root@%s \"ping -c 100 %s \"' % (eip_ip, vm2_ip) child1 = subprocess.Popen(cmd1,shell=True) time.sleep(5) subprocess.call('sshpass -p password scp /etc/yum.repos.d/zstack-local.repo root@%s:/etc/yum.repos.d' % vm1_host_ip, shell=True) subprocess.call('sshpass -p password scp /etc/yum.repos.d/zstack-local.repo root@%s:/etc/yum.repos.d' % vm2_host_ip, shell=True) subprocess.call('sshpass -p password ssh root@%s yum --disablerepo=* --enablerepo=zstack-local install tcpdump -y' % vm1_host_ip, shell=True) subprocess.call('sshpass -p password ssh root@%s yum --disablerepo=* --enablerepo=zstack-local install tcpdump -y' % vm2_host_ip, shell=True) cmd2 = 'sshpass -p password ssh root@%s \"tcpdump -c 5 -i %s > /tmp/host1_tcpdump.log\"' % (vm1_host_ip, vr_bridge) child2 = subprocess.Popen(cmd2,shell=True) cmd3 = 'sshpass -p password ssh root@%s \"tcpdump -c 5 -i %s > /tmp/host2_tcpdump.log\"' % (vm2_host_ip, vr_bridge) child3 = subprocess.Popen(cmd3,shell=True) time.sleep(10) cmd4 = 'sshpass -p password scp root@%s:/tmp/host1_tcpdump.log /root' % (vm1_host_ip) child4 = subprocess.call(cmd4,shell=True) cmd5 = 'sshpass -p password scp root@%s:/tmp/host2_tcpdump.log /root' % (vm2_host_ip) child5 = subprocess.call(cmd5,shell=True) with open('/root/host1_tcpdump.log' ,'r') as fd: line = fd.readlines() if vm1_ip in line[0] and vm2_ip in line[0]: with open('/root/host2_tcpdump.log' ,'r') as fdd: linee = fdd.readlines() if vm1_ip in linee[0] and vm2_ip in linee[0]: test_util.test_dsc("check tcpdump from vr bridge pass after reconnect vr") os.system('rm -f /root/host1_tcpdump.log /root/host2_tcpdump.log') else: test_util.test_fail('check tcpdump from vr bridge failed after reconnect vr') else: test_util.test_fail('check tcpdump from vr bridge failed after reconnect vr') subprocess.call('sshpass -p password ssh root@%s \"pkill ping\"' % (eip_ip),shell=True) test_util.test_dsc("reboot vpc and check again") vr.reboot() cmd1 = 'sshpass -p password ssh root@%s \"ping -c 100 %s \"' % (eip_ip, vm2_ip) child1 = subprocess.Popen(cmd1,shell=True) time.sleep(5) subprocess.call('sshpass -p password scp /etc/yum.repos.d/zstack-local.repo root@%s:/etc/yum.repos.d' % vm1_host_ip, shell=True) subprocess.call('sshpass -p password scp /etc/yum.repos.d/zstack-local.repo root@%s:/etc/yum.repos.d' % vm2_host_ip, shell=True) subprocess.call('sshpass -p password ssh root@%s yum --disablerepo=* --enablerepo=zstack-local install tcpdump -y' % vm1_host_ip, shell=True) subprocess.call('sshpass -p password ssh root@%s yum --disablerepo=* --enablerepo=zstack-local install tcpdump -y' % vm2_host_ip, shell=True) cmd2 = 'sshpass -p password ssh root@%s \"tcpdump -c 5 -i %s > /tmp/host1_tcpdump.log\"' % (vm1_host_ip, vr_bridge) child2 = subprocess.Popen(cmd2,shell=True) cmd3 = 'sshpass -p password ssh root@%s \"tcpdump -c 5 -i %s > /tmp/host2_tcpdump.log\"' % (vm2_host_ip, vr_bridge) child3 = subprocess.Popen(cmd3,shell=True) time.sleep(10) cmd4 = 'sshpass -p password scp root@%s:/tmp/host1_tcpdump.log /root' % (vm1_host_ip) child4 = subprocess.call(cmd4,shell=True) cmd5 = 'sshpass -p password scp root@%s:/tmp/host2_tcpdump.log /root' % (vm2_host_ip) child5 = subprocess.call(cmd5,shell=True) with open('/root/host1_tcpdump.log' ,'r') as fd: line = fd.readlines() if vm1_ip in line[0] and vm2_ip in line[0]: with open('/root/host2_tcpdump.log' ,'r') as fdd: linee = fdd.readlines() if vm1_ip in linee[0] and vm2_ip in linee[0]: test_util.test_dsc("check tcpdump from vr bridge pass") os.system('rm -f /root/host1_tcpdump.log /root/host2_tcpdump.log') test_util.test_dsc("disable dr vxlan") vpc_ops.set_vpc_dr_vxlan(vr_uuid, 'disable') else: test_util.test_fail('check tcpdump from vr bridge failed after reboot vr') else: test_util.test_fail('check tcpdump from vr bridge failed after reboot vr') subprocess.call('sshpass -p password ssh root@%s \"pkill ping\"' % (eip_ip),shell=True) test_util.test_pass('check tcpdump from vr bridge pass') test_lib.lib_error_cleanup(test_obj_dict) test_stub.remove_all_vpc_vrouter() def env_recover(): test_lib.lib_error_cleanup(test_obj_dict) test_stub.remove_all_vpc_vrouter()
apache-2.0
4,042,721,449,108,278,000
52.520958
153
0.674424
false
waynesun09/tp-libvirt
libvirt/tests/src/virsh_cmd/pool/virsh_pool_acl.py
1
11002
import re import os import logging from autotest.client import utils from autotest.client import lv_utils from autotest.client.shared import error from virttest import libvirt_storage from virttest import utils_test from virttest import virsh from virttest.utils_test import libvirt from provider import libvirt_version def run(test, params, env): """ Test the virsh pool commands with acl, initiate a pool then do following operations. (1) Undefine a given type pool (2) Define the pool from xml (3) Build given type pool (4) Start pool (5) Destroy pool (6) Refresh pool after start it (7) Run vol-list with the pool (9) Delete pool For negative cases, redo failed step to make the case run continue. Run cleanup at last restore env. """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name vol_path = os.path.join(pool_target, vol_name) define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") start_acl = "yes" == params.get("start_acl", "no") destroy_acl = "yes" == params.get("destroy_acl", "no") build_acl = "yes" == params.get("build_acl", "no") delete_acl = "yes" == params.get("delete_acl", "no") refresh_acl = "yes" == params.get("refresh_acl", "no") vol_list_acl = "yes" == params.get("vol_list_acl", "no") list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no") src_pool_error = "yes" == params.get("src_pool_error", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") start_error = "yes" == params.get("start_error", "no") destroy_error = "yes" == params.get("destroy_error", "no") build_error = "yes" == params.get("build_error", "no") delete_error = "yes" == params.get("delete_error", "no") refresh_error = "yes" == params.get("refresh_error", "no") vol_list_error = "yes" == params.get("vol_list_error", "no") # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster cleanup_env = [False, False, False, "", False] # libvirt acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = 'testacl' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True} def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable if list_dumpxml_acl: result = virsh.pool_list(option, **acl_dargs) else: result = virsh.pool_list(option, ignore_status=True) libvirt.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) # Run Testcase try: _pool = libvirt_storage.StoragePool() # Init a pool for test result = utils_test.libvirt.define_pool(pool_name, pool_type, pool_target, cleanup_env) libvirt.check_exit_status(result, src_pool_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if list_dumpxml_acl: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs) else: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (1) # Undefine pool if undefine_acl: result = virsh.pool_undefine(pool_name, **acl_dargs) else: result = virsh.pool_undefine(pool_name, ignore_status=True) libvirt.check_exit_status(result, undefine_error) if undefine_error: check_pool_list(pool_name, "--all", False) # Redo under negative case to keep case continue result = virsh.pool_undefine(pool_name, ignore_status=True) libvirt.check_exit_status(result) check_pool_list(pool_name, "--all", True) else: check_pool_list(pool_name, "--all", True) # Step (2) # Define pool from XML file if define_acl: result = virsh.pool_define(pool_xml, **acl_dargs) else: result = virsh.pool_define(pool_xml) libvirt.check_exit_status(result, define_error) if define_error: # Redo under negative case to keep case continue result = virsh.pool_define(pool_xml) libvirt.check_exit_status(result) # Step (3) # Buid pool, this step may fail for 'disk' and 'logical' types pool if pool_type not in ["disk", "logical"]: option = "" # Options --overwrite and --no-overwrite can only be used to # build a filesystem pool, but it will fail for now # if pool_type == "fs": # option = '--overwrite' if build_acl: result = virsh.pool_build(pool_name, option, **acl_dargs) else: result = virsh.pool_build(pool_name, option, ignore_status=True) libvirt.check_exit_status(result, build_error) if build_error: # Redo under negative case to keep case continue result = virsh.pool_build(pool_name, option, ignore_status=True) libvirt.check_exit_status(result) # Step (4) # Pool start if start_acl: result = virsh.pool_start(pool_name, **acl_dargs) else: result = virsh.pool_start(pool_name, ignore_status=True) libvirt.check_exit_status(result, start_error) if start_error: # Redo under negative case to keep case continue result = virsh.pool_start(pool_name, ignore_status=True) libvirt.check_exit_status(result) option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (5) # Pool destroy if destroy_acl: result = virsh.pool_destroy(pool_name, **acl_dargs) else: result = virsh.pool_destroy(pool_name) if result: if destroy_error: raise error.TestFail("Expect fail, but run successfully.") else: if not destroy_error: raise error.TestFail("Pool %s destroy failed, not expected." % pool_name) else: # Redo under negative case to keep case continue if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (6) # Pool refresh for 'dir' type pool # Pool start result = virsh.pool_start(pool_name, ignore_status=True) libvirt.check_exit_status(result) if pool_type == "dir": os.mknod(vol_path) if refresh_acl: result = virsh.pool_refresh(pool_name, **acl_dargs) else: result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(result, refresh_error) # Step (7) # Pool vol-list if vol_list_acl: result = virsh.vol_list(pool_name, **acl_dargs) else: result = virsh.vol_list(pool_name) libvirt.check_exit_status(result, vol_list_error) # Step (8) # Pool delete for 'dir' type pool if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) if delete_acl: result = virsh.pool_delete(pool_name, **acl_dargs) else: result = virsh.pool_delete(pool_name, ignore_status=True) libvirt.check_exit_status(result, delete_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if not delete_error: if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_undefine(pool_name, ignore_status=True) libvirt.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = utils.system_output(cmd) lv_utils.vg_remove(vg_name) utils.run("pvremove %s" % pv_name) if cleanup_env[1]: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utils_test.libvirt.setup_or_cleanup_nfs( False, restore_selinux=cleanup_env[3])
gpl-2.0
5,504,473,698,991,037,000
39.300366
79
0.566624
false