text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> encoding of Response.content. <END_TASK> <USER_TASK:> Description: def encoding(self): """ encoding of Response.content. if Response.encoding is None, encoding will be guessed by header or content or chardet if available. """
if hasattr(self, '_encoding'): return self._encoding # content is unicode if isinstance(self.content, six.text_type): return 'unicode' # Try charset from content-type or content encoding = get_encoding(self.headers, self.content) # Fallback to auto-detected encoding. if not encoding and chardet is not None: encoding = chardet.detect(self.content[:600])['encoding'] if encoding and encoding.lower() == 'gb2312': encoding = 'gb18030' self._encoding = encoding or 'utf-8' return self._encoding
<SYSTEM_TASK:> Returns the json-encoded content of the response, if any. <END_TASK> <USER_TASK:> Description: def json(self): """Returns the json-encoded content of the response, if any."""
if hasattr(self, '_json'): return self._json try: self._json = json.loads(self.text or self.content) except ValueError: self._json = None return self._json
<SYSTEM_TASK:> Returns a PyQuery object of the response's content <END_TASK> <USER_TASK:> Description: def doc(self): """Returns a PyQuery object of the response's content"""
if hasattr(self, '_doc'): return self._doc elements = self.etree doc = self._doc = PyQuery(elements) doc.make_links_absolute(utils.text(self.url)) return doc
<SYSTEM_TASK:> Returns a lxml object of the response's content that can be selected by xpath <END_TASK> <USER_TASK:> Description: def etree(self): """Returns a lxml object of the response's content that can be selected by xpath"""
if not hasattr(self, '_elements'): try: parser = lxml.html.HTMLParser(encoding=self.encoding) self._elements = lxml.html.fromstring(self.content, parser=parser) except LookupError: # lxml would raise LookupError when encoding not supported # try fromstring without encoding instead. # on windows, unicode is not availabe as encoding for lxml self._elements = lxml.html.fromstring(self.content) if isinstance(self._elements, lxml.etree._ElementTree): self._elements = self._elements.getroot() return self._elements
<SYSTEM_TASK:> Do not send process status package back to scheduler. <END_TASK> <USER_TASK:> Description: def not_send_status(func): """ Do not send process status package back to scheduler. It's used by callbacks like on_message, on_result etc... """
@functools.wraps(func) def wrapper(self, response, task): self._extinfo['not_send_status'] = True function = func.__get__(self, self.__class__) return self._run_func(function, response, task) return wrapper
<SYSTEM_TASK:> A decorator for setting the default kwargs of `BaseHandler.crawl`. <END_TASK> <USER_TASK:> Description: def config(_config=None, **kwargs): """ A decorator for setting the default kwargs of `BaseHandler.crawl`. Any self.crawl with this callback will use this config. """
if _config is None: _config = {} _config.update(kwargs) def wrapper(func): func._config = _config return func return wrapper
<SYSTEM_TASK:> method will been called every minutes or seconds <END_TASK> <USER_TASK:> Description: def every(minutes=NOTSET, seconds=NOTSET): """ method will been called every minutes or seconds """
def wrapper(func): # mark the function with variable 'is_cronjob=True', the function would be # collected into the list Handler._cron_jobs by meta class func.is_cronjob = True # collect interval and unify to seconds, it's used in meta class. See the # comments in meta class. func.tick = minutes * 60 + seconds return func if inspect.isfunction(minutes): func = minutes minutes = 1 seconds = 0 return wrapper(func) if minutes is NOTSET: if seconds is NOTSET: minutes = 1 seconds = 0 else: minutes = 0 if seconds is NOTSET: seconds = 0 return wrapper
<SYSTEM_TASK:> Catch errors of rabbitmq then reconnect <END_TASK> <USER_TASK:> Description: def catch_error(func): """Catch errors of rabbitmq then reconnect"""
import amqp try: import pika.exceptions connect_exceptions = ( pika.exceptions.ConnectionClosed, pika.exceptions.AMQPConnectionError, ) except ImportError: connect_exceptions = () connect_exceptions += ( select.error, socket.error, amqp.ConnectionError ) def wrap(self, *args, **kwargs): try: return func(self, *args, **kwargs) except connect_exceptions as e: logging.error('RabbitMQ error: %r, reconnect.', e) self.reconnect() return func(self, *args, **kwargs) return wrap
<SYSTEM_TASK:> handler the log records to formatted string <END_TASK> <USER_TASK:> Description: def logstr(self): """handler the log records to formatted string"""
result = [] formater = LogFormatter(color=False) for record in self.logs: if isinstance(record, six.string_types): result.append(pretty_unicode(record)) else: if record.exc_info: a, b, tb = record.exc_info tb = hide_me(tb, globals()) record.exc_info = a, b, tb result.append(pretty_unicode(formater.format(record))) result.append(u'\n') return u''.join(result)
<SYSTEM_TASK:> Make sure string is utf8 encoded bytes. <END_TASK> <USER_TASK:> Description: def utf8(string): """ Make sure string is utf8 encoded bytes. If parameter is a object, object.__str__ will been called before encode as bytes """
if isinstance(string, six.text_type): return string.encode('utf8') elif isinstance(string, six.binary_type): return string else: return six.text_type(string).encode('utf8')
<SYSTEM_TASK:> Make sure string is unicode type, decode with given encoding if it's not. <END_TASK> <USER_TASK:> Description: def text(string, encoding='utf8'): """ Make sure string is unicode type, decode with given encoding if it's not. If parameter is a object, object.__str__ will been called """
if isinstance(string, six.text_type): return string elif isinstance(string, six.binary_type): return string.decode(encoding) else: return six.text_type(string)
<SYSTEM_TASK:> Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed. <END_TASK> <USER_TASK:> Description: def pretty_unicode(string): """ Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed. """
if isinstance(string, six.text_type): return string try: return string.decode("utf8") except UnicodeDecodeError: return string.decode('Latin-1').encode('unicode_escape').decode("utf8")
<SYSTEM_TASK:> Make sure string is unicode, try to default with utf8, or base64 if failed. <END_TASK> <USER_TASK:> Description: def unicode_string(string): """ Make sure string is unicode, try to default with utf8, or base64 if failed. can been decode by `decode_unicode_string` """
if isinstance(string, six.text_type): return string try: return string.decode("utf8") except UnicodeDecodeError: return '[BASE64-DATA]' + base64.b64encode(string) + '[/BASE64-DATA]'
<SYSTEM_TASK:> Make sure keys and values of dict is unicode. <END_TASK> <USER_TASK:> Description: def unicode_dict(_dict): """ Make sure keys and values of dict is unicode. """
r = {} for k, v in iteritems(_dict): r[unicode_obj(k)] = unicode_obj(v) return r
<SYSTEM_TASK:> Decode string encoded by `unicode_string` <END_TASK> <USER_TASK:> Description: def decode_unicode_string(string): """ Decode string encoded by `unicode_string` """
if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'): return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')]) return string
<SYSTEM_TASK:> Return a interactive python console instance with caller's stack <END_TASK> <USER_TASK:> Description: def get_python_console(namespace=None): """ Return a interactive python console instance with caller's stack """
if namespace is None: import inspect frame = inspect.currentframe() caller = frame.f_back if not caller: logging.error("can't find caller who start this console.") caller = frame namespace = dict(caller.f_globals) namespace.update(caller.f_locals) try: from IPython.terminal.interactiveshell import TerminalInteractiveShell shell = TerminalInteractiveShell(user_ns=namespace) except ImportError: try: import readline import rlcompleter readline.set_completer(rlcompleter.Completer(namespace).complete) readline.parse_and_bind("tab: complete") except ImportError: pass import code shell = code.InteractiveConsole(namespace) shell._quit = False def exit(): shell._quit = True def readfunc(prompt=""): if shell._quit: raise EOFError return six.moves.input(prompt) # inject exit method shell.ask_exit = exit shell.raw_input = readfunc return shell
<SYSTEM_TASK:> Start a interactive python console with caller's stack <END_TASK> <USER_TASK:> Description: def python_console(namespace=None): """Start a interactive python console with caller's stack"""
if namespace is None: import inspect frame = inspect.currentframe() caller = frame.f_back if not caller: logging.error("can't find caller who start this console.") caller = frame namespace = dict(caller.f_globals) namespace.update(caller.f_locals) return get_python_console(namespace=namespace).interact()
<SYSTEM_TASK:> XMLRPC service for windmill browser core to communicate with <END_TASK> <USER_TASK:> Description: def handler(self, environ, start_response): """XMLRPC service for windmill browser core to communicate with"""
if environ['REQUEST_METHOD'] == 'POST': return self.handle_POST(environ, start_response) else: start_response("400 Bad request", [('Content-Type', 'text/plain')]) return ['']
<SYSTEM_TASK:> Check projects cronjob tick, return True when a new tick is sended <END_TASK> <USER_TASK:> Description: def _check_cronjob(self): """Check projects cronjob tick, return True when a new tick is sended"""
now = time.time() self._last_tick = int(self._last_tick) if now - self._last_tick < 1: return False self._last_tick += 1 for project in itervalues(self.projects): if not project.active: continue if project.waiting_get_info: continue if int(project.min_tick) == 0: continue if self._last_tick % int(project.min_tick) != 0: continue self.on_select_task({ 'taskid': '_on_cronjob', 'project': project.name, 'url': 'data:,_on_cronjob', 'status': self.taskdb.SUCCESS, 'fetch': { 'save': { 'tick': self._last_tick, }, }, 'process': { 'callback': '_on_cronjob', }, }) return True
<SYSTEM_TASK:> interactive mode of select tasks <END_TASK> <USER_TASK:> Description: def _check_select(self): """ interactive mode of select tasks """
if not self.interactive: return super(OneScheduler, self)._check_select() # waiting for running tasks if self.running_task > 0: return is_crawled = [] def run(project=None): return crawl('on_start', project=project) def crawl(url, project=None, **kwargs): """ Crawl given url, same parameters as BaseHandler.crawl url - url or taskid, parameters will be used if in taskdb project - can be ignored if only one project exists. """ # looking up the project instance if project is None: if len(self.projects) == 1: project = list(self.projects.keys())[0] else: raise LookupError('You need specify the project: %r' % list(self.projects.keys())) project_data = self.processor.project_manager.get(project) if not project_data: raise LookupError('no such project: %s' % project) # get task package instance = project_data['instance'] instance._reset() task = instance.crawl(url, **kwargs) if isinstance(task, list): raise Exception('url list is not allowed in interactive mode') # check task in taskdb if not kwargs: dbtask = self.taskdb.get_task(task['project'], task['taskid'], fields=self.request_task_fields) if not dbtask: dbtask = self.taskdb.get_task(task['project'], task['url'], fields=self.request_task_fields) if dbtask: task = dbtask # select the task self.on_select_task(task) is_crawled.append(True) shell.ask_exit() def quit_interactive(): '''Quit interactive mode''' is_crawled.append(True) self.interactive = False shell.ask_exit() def quit_pyspider(): '''Close pyspider''' is_crawled[:] = [] shell.ask_exit() shell = utils.get_python_console() banner = ( 'pyspider shell - Select task\n' 'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n' 'quit_interactive() - Quit interactive mode\n' 'quit_pyspider() - Close pyspider' ) if hasattr(shell, 'show_banner'): shell.show_banner(banner) shell.interact() else: shell.interact(banner) if not is_crawled: self.ioloop.add_callback(self.ioloop.stop)
<SYSTEM_TASK:> Ignore not processing error in interactive mode <END_TASK> <USER_TASK:> Description: def on_task_status(self, task): """Ignore not processing error in interactive mode"""
if not self.interactive: super(OneScheduler, self).on_task_status(task) try: procesok = task['track']['process']['ok'] except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']].active_tasks.appendleft((time.time(), task)) return ret
<SYSTEM_TASK:> make cookie python 3 version use this instead of getheaders <END_TASK> <USER_TASK:> Description: def get_all(self, name, default=None): """make cookie python 3 version use this instead of getheaders"""
if default is None: default = [] return self._headers.get_list(name) or default
<SYSTEM_TASK:> Explicitly refresh one or more index, making all operations <END_TASK> <USER_TASK:> Description: def refresh(self): """ Explicitly refresh one or more index, making all operations performed since the last refresh available for search. """
self._changed = False self.es.indices.refresh(index=self.index)
<SYSTEM_TASK:> Set value of a counter by counter key <END_TASK> <USER_TASK:> Description: def value(self, key, value=1): """Set value of a counter by counter key"""
if isinstance(key, six.string_types): key = (key, ) # assert all(isinstance(k, six.string_types) for k in key) assert isinstance(key, tuple), "event key type error" if key not in self.counters: self.counters[key] = self.cls() self.counters[key].value(value) return self
<SYSTEM_TASK:> Clear not used counters <END_TASK> <USER_TASK:> Description: def trim(self): """Clear not used counters"""
for key, value in list(iteritems(self.counters)): if value.empty(): del self.counters[key]
<SYSTEM_TASK:> Load counters to file <END_TASK> <USER_TASK:> Description: def load(self, filename): """Load counters to file"""
try: with open(filename, 'rb') as fp: self.counters = cPickle.load(fp) except: logging.debug("can't load counter from file: %s", filename) return False return True
<SYSTEM_TASK:> Run Scheduler, only one scheduler is allowed. <END_TASK> <USER_TASK:> Description: def scheduler(ctx, xmlrpc, xmlrpc_host, xmlrpc_port, inqueue_limit, delete_time, active_tasks, loop_limit, fail_pause_num, scheduler_cls, threads, get_object=False): """ Run Scheduler, only one scheduler is allowed. """
g = ctx.obj Scheduler = load_cls(None, None, scheduler_cls) kwargs = dict(taskdb=g.taskdb, projectdb=g.projectdb, resultdb=g.resultdb, newtask_queue=g.newtask_queue, status_queue=g.status_queue, out_queue=g.scheduler2fetcher, data_path=g.get('data_path', 'data')) if threads: kwargs['threads'] = int(threads) scheduler = Scheduler(**kwargs) scheduler.INQUEUE_LIMIT = inqueue_limit scheduler.DELETE_TIME = delete_time scheduler.ACTIVE_TASKS = active_tasks scheduler.LOOP_LIMIT = loop_limit scheduler.FAIL_PAUSE_NUM = fail_pause_num g.instances.append(scheduler) if g.get('testing_mode') or get_object: return scheduler if xmlrpc: utils.run_in_thread(scheduler.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host) scheduler.run()
<SYSTEM_TASK:> Run phantomjs fetcher if phantomjs is installed. <END_TASK> <USER_TASK:> Description: def phantomjs(ctx, phantomjs_path, port, auto_restart, args): """ Run phantomjs fetcher if phantomjs is installed. """
args = args or ctx.default_map and ctx.default_map.get('args', []) import subprocess g = ctx.obj _quit = [] phantomjs_fetcher = os.path.join( os.path.dirname(pyspider.__file__), 'fetcher/phantomjs_fetcher.js') cmd = [phantomjs_path, # this may cause memory leak: https://github.com/ariya/phantomjs/issues/12903 #'--load-images=false', '--ssl-protocol=any', '--disk-cache=true'] + list(args or []) + [phantomjs_fetcher, str(port)] try: _phantomjs = subprocess.Popen(cmd) except OSError: logging.warning('phantomjs not found, continue running without it.') return None def quit(*args, **kwargs): _quit.append(1) _phantomjs.kill() _phantomjs.wait() logging.info('phantomjs exited.') if not g.get('phantomjs_proxy'): g['phantomjs_proxy'] = '127.0.0.1:%s' % port phantomjs = utils.ObjectDict(port=port, quit=quit) g.instances.append(phantomjs) if g.get('testing_mode'): return phantomjs while True: _phantomjs.wait() if _quit or not auto_restart: break _phantomjs = subprocess.Popen(cmd)
<SYSTEM_TASK:> Run puppeteer fetcher if puppeteer is installed. <END_TASK> <USER_TASK:> Description: def puppeteer(ctx, port, auto_restart, args): """ Run puppeteer fetcher if puppeteer is installed. """
import subprocess g = ctx.obj _quit = [] puppeteer_fetcher = os.path.join( os.path.dirname(pyspider.__file__), 'fetcher/puppeteer_fetcher.js') cmd = ['node', puppeteer_fetcher, str(port)] try: _puppeteer = subprocess.Popen(cmd) except OSError: logging.warning('puppeteer not found, continue running without it.') return None def quit(*args, **kwargs): _quit.append(1) _puppeteer.kill() _puppeteer.wait() logging.info('puppeteer exited.') if not g.get('puppeteer_proxy'): g['puppeteer_proxy'] = '127.0.0.1:%s' % port puppeteer = utils.ObjectDict(port=port, quit=quit) g.instances.append(puppeteer) if g.get('testing_mode'): return puppeteer while True: _puppeteer.wait() if _quit or not auto_restart: break _puppeteer = subprocess.Popen(cmd)
<SYSTEM_TASK:> Run all the components in subprocess or thread <END_TASK> <USER_TASK:> Description: def all(ctx, fetcher_num, processor_num, result_worker_num, run_in): """ Run all the components in subprocess or thread """
ctx.obj['debug'] = False g = ctx.obj # FIXME: py34 cannot run components with threads if run_in == 'subprocess' and os.name != 'nt': run_in = utils.run_in_subprocess else: run_in = utils.run_in_thread threads = [] try: # phantomjs if not g.get('phantomjs_proxy'): phantomjs_config = g.config.get('phantomjs', {}) phantomjs_config.setdefault('auto_restart', True) threads.append(run_in(ctx.invoke, phantomjs, **phantomjs_config)) time.sleep(2) if threads[-1].is_alive() and not g.get('phantomjs_proxy'): g['phantomjs_proxy'] = '127.0.0.1:%s' % phantomjs_config.get('port', 25555) # puppeteer if not g.get('puppeteer_proxy'): puppeteer_config = g.config.get('puppeteer', {}) puppeteer_config.setdefault('auto_restart', True) threads.append(run_in(ctx.invoke, puppeteer, **puppeteer_config)) time.sleep(2) if threads[-1].is_alive() and not g.get('puppeteer_proxy'): g['puppeteer_proxy'] = '127.0.0.1:%s' % puppeteer_config.get('port', 22222) # result worker result_worker_config = g.config.get('result_worker', {}) for i in range(result_worker_num): threads.append(run_in(ctx.invoke, result_worker, **result_worker_config)) # processor processor_config = g.config.get('processor', {}) for i in range(processor_num): threads.append(run_in(ctx.invoke, processor, **processor_config)) # fetcher fetcher_config = g.config.get('fetcher', {}) fetcher_config.setdefault('xmlrpc_host', '127.0.0.1') for i in range(fetcher_num): threads.append(run_in(ctx.invoke, fetcher, **fetcher_config)) # scheduler scheduler_config = g.config.get('scheduler', {}) scheduler_config.setdefault('xmlrpc_host', '127.0.0.1') threads.append(run_in(ctx.invoke, scheduler, **scheduler_config)) # running webui in main thread to make it exitable webui_config = g.config.get('webui', {}) webui_config.setdefault('scheduler_rpc', 'http://127.0.0.1:%s/' % g.config.get('scheduler', {}).get('xmlrpc_port', 23333)) ctx.invoke(webui, **webui_config) finally: # exit components run in threading for each in g.instances: each.quit() # exit components run in subprocess for each in threads: if not each.is_alive(): continue if hasattr(each, 'terminate'): each.terminate() each.join()
<SYSTEM_TASK:> One mode not only means all-in-one, it runs every thing in one process over <END_TASK> <USER_TASK:> Description: def one(ctx, interactive, enable_phantomjs, enable_puppeteer, scripts): """ One mode not only means all-in-one, it runs every thing in one process over tornado.ioloop, for debug purpose """
ctx.obj['debug'] = False g = ctx.obj g['testing_mode'] = True if scripts: from pyspider.database.local.projectdb import ProjectDB g['projectdb'] = ProjectDB(scripts) if g.get('is_taskdb_default'): g['taskdb'] = connect_database('sqlite+taskdb://') if g.get('is_resultdb_default'): g['resultdb'] = None if enable_phantomjs: phantomjs_config = g.config.get('phantomjs', {}) phantomjs_obj = ctx.invoke(phantomjs, **phantomjs_config) if phantomjs_obj: g.setdefault('phantomjs_proxy', '127.0.0.1:%s' % phantomjs_obj.port) else: phantomjs_obj = None if enable_puppeteer: puppeteer_config = g.config.get('puppeteer', {}) puppeteer_obj = ctx.invoke(puppeteer, **puppeteer_config) if puppeteer_obj: g.setdefault('puppeteer_proxy', '127.0.0.1:%s' % puppeteer.port) else: puppeteer_obj = None result_worker_config = g.config.get('result_worker', {}) if g.resultdb is None: result_worker_config.setdefault('result_cls', 'pyspider.result.OneResultWorker') result_worker_obj = ctx.invoke(result_worker, **result_worker_config) processor_config = g.config.get('processor', {}) processor_config.setdefault('enable_stdout_capture', False) processor_obj = ctx.invoke(processor, **processor_config) fetcher_config = g.config.get('fetcher', {}) fetcher_config.setdefault('xmlrpc', False) fetcher_obj = ctx.invoke(fetcher, **fetcher_config) scheduler_config = g.config.get('scheduler', {}) scheduler_config.setdefault('xmlrpc', False) scheduler_config.setdefault('scheduler_cls', 'pyspider.scheduler.OneScheduler') scheduler_obj = ctx.invoke(scheduler, **scheduler_config) scheduler_obj.init_one(ioloop=fetcher_obj.ioloop, fetcher=fetcher_obj, processor=processor_obj, result_worker=result_worker_obj, interactive=interactive) if scripts: for project in g.projectdb.projects: scheduler_obj.trigger_on_start(project) try: scheduler_obj.run() finally: scheduler_obj.quit() if phantomjs_obj: phantomjs_obj.quit() if puppeteer_obj: puppeteer_obj.quit()
<SYSTEM_TASK:> Send Message to project from command line <END_TASK> <USER_TASK:> Description: def send_message(ctx, scheduler_rpc, project, message): """ Send Message to project from command line """
if isinstance(scheduler_rpc, six.string_types): scheduler_rpc = connect_rpc(ctx, None, scheduler_rpc) if scheduler_rpc is None and os.environ.get('SCHEDULER_NAME'): scheduler_rpc = connect_rpc(ctx, None, 'http://%s/' % ( os.environ['SCHEDULER_PORT_23333_TCP'][len('tcp://'):])) if scheduler_rpc is None: scheduler_rpc = connect_rpc(ctx, None, 'http://127.0.0.1:23333/') return scheduler_rpc.send_task({ 'taskid': utils.md5string('data:,on_message'), 'project': project, 'url': 'data:,on_message', 'fetch': { 'save': ('__command__', message), }, 'process': { 'callback': '_on_message', } })
<SYSTEM_TASK:> Format a Python object into a pretty-printed representation. <END_TASK> <USER_TASK:> Description: def pformat(object, indent=1, width=80, depth=None): """Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
<SYSTEM_TASK:> Format object for a specific context, returning a string <END_TASK> <USER_TASK:> Description: def format(self, object, context, maxlevels, level): """Format object for a specific context, returning a string and flags indicating whether the representation is 'readable' and whether the object represents a recursive construct. """
return _safe_repr(object, context, maxlevels, level)
<SYSTEM_TASK:> Migrate tool for pyspider <END_TASK> <USER_TASK:> Description: def migrate(pool, from_connection, to_connection): """ Migrate tool for pyspider """
f = connect_database(from_connection) t = connect_database(to_connection) if isinstance(f, ProjectDB): for each in f.get_all(): each = unicode_obj(each) logging.info("projectdb: %s", each['name']) t.drop(each['name']) t.insert(each['name'], each) elif isinstance(f, TaskDB): pool = Pool(pool) pool.map( lambda x, f=from_connection, t=to_connection: taskdb_migrating(x, f, t), f.projects) elif isinstance(f, ResultDB): pool = Pool(pool) pool.map( lambda x, f=from_connection, t=to_connection: resultdb_migrating(x, f, t), f.projects)
<SYSTEM_TASK:> Adds an accuracy op to the model <END_TASK> <USER_TASK:> Description: def AddAccuracy(model, softmax, label): """Adds an accuracy op to the model"""
accuracy = brew.accuracy(model, [softmax, label], "accuracy") return accuracy
<SYSTEM_TASK:> Adds training operators to the model. <END_TASK> <USER_TASK:> Description: def AddTrainingOperators(model, softmax, label): """Adds training operators to the model."""
xent = model.LabelCrossEntropy([softmax, label], 'xent') # compute the expected loss loss = model.AveragedLoss(xent, "loss") # track the accuracy of the model AddAccuracy(model, softmax, label) # use the average loss we just computed to add gradient operators to the # model model.AddGradientOperators([loss]) # do a simple stochastic gradient descent ITER = brew.iter(model, "iter") # set the learning rate schedule LR = model.LearningRate( ITER, "LR", base_lr=-0.1, policy="step", stepsize=1, gamma=0.999) # ONE is a constant value that is used in the gradient update. We only need # to create it once, so it is explicitly placed in param_init_net. ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0) # Now, for each parameter, we do the gradient updates. for param in model.params: # Note how we get the gradient of each parameter - ModelHelper keeps # track of that. param_grad = model.param_to_grad[param] # The update is a simple weighted sum: param = param + param_grad * LR model.WeightedSum([param, ONE, param_grad, LR], param)
<SYSTEM_TASK:> This adds a few bookkeeping operators that we can inspect later. <END_TASK> <USER_TASK:> Description: def AddBookkeepingOperators(model): """This adds a few bookkeeping operators that we can inspect later. These operators do not affect the training procedure: they only collect statistics and prints them to file or to logs. """
# Print basically prints out the content of the blob. to_file=1 routes the # printed output to a file. The file is going to be stored under # root_folder/[blob name] model.Print('accuracy', [], to_file=1) model.Print('loss', [], to_file=1) # Summarizes the parameters. Different from Print, Summarize gives some # statistics of the parameter, such as mean, std, min and max. for param in model.params: model.Summarize(param, [], to_file=1) model.Summarize(model.param_to_grad[param], [], to_file=1)
<SYSTEM_TASK:> Get loss function of VAE. <END_TASK> <USER_TASK:> Description: def get_loss_func(self, C=1.0, k=1): """Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector. """
def lf(x): mu, ln_var = self.encode(x) batchsize = len(mu.data) # reconstruction loss rec_loss = 0 for l in six.moves.range(k): z = F.gaussian(mu, ln_var) rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \ / (k * batchsize) self.rec_loss = rec_loss self.loss = self.rec_loss + \ C * gaussian_kl_divergence(mu, ln_var) / batchsize return self.loss return lf
<SYSTEM_TASK:> Processes an entire step by applying the processor to the observation, reward, and info arguments. <END_TASK> <USER_TASK:> Description: def process_step(self, observation, reward, done, info): """Processes an entire step by applying the processor to the observation, reward, and info arguments. # Arguments observation (object): An observation as obtained by the environment. reward (float): A reward as obtained by the environment. done (boolean): `True` if the environment is in a terminal state, `False` otherwise. info (dict): The debug info dictionary as obtained by the environment. # Returns The tupel (observation, reward, done, reward) with with all elements after being processed. """
observation = self.process_observation(observation) reward = self.process_reward(reward) info = self.process_info(info) return observation, reward, done, info
<SYSTEM_TASK:> Return current annealing value <END_TASK> <USER_TASK:> Description: def get_current_value(self): """Return current annealing value # Returns Value to use in annealing """
if self.agent.training: # Linear annealed: f(x) = ax + b. a = -float(self.value_max - self.value_min) / float(self.nb_steps) b = float(self.value_max) value = max(self.value_min, a * float(self.agent.step) + b) else: value = self.value_test return value
<SYSTEM_TASK:> Choose an action to perform <END_TASK> <USER_TASK:> Description: def select_action(self, **kwargs): """Choose an action to perform # Returns Action to take (int) """
setattr(self.inner_policy, self.attr, self.get_current_value()) return self.inner_policy.select_action(**kwargs)
<SYSTEM_TASK:> Return configurations of BoltzmannGumbelQPolicy <END_TASK> <USER_TASK:> Description: def get_config(self): """Return configurations of BoltzmannGumbelQPolicy # Returns Dict of config """
config = super(BoltzmannGumbelQPolicy, self).get_config() config['C'] = self.C return config
<SYSTEM_TASK:> Set environment for each callback in callbackList <END_TASK> <USER_TASK:> Description: def _set_env(self, env): """ Set environment for each callback in callbackList """
for callback in self.callbacks: if callable(getattr(callback, '_set_env', None)): callback._set_env(env)
<SYSTEM_TASK:> Called at beginning of each episode for each callback in callbackList <END_TASK> <USER_TASK:> Description: def on_episode_begin(self, episode, logs={}): """ Called at beginning of each episode for each callback in callbackList"""
for callback in self.callbacks: # Check if callback supports the more appropriate `on_episode_begin` callback. # If not, fall back to `on_epoch_begin` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_episode_begin', None)): callback.on_episode_begin(episode, logs=logs) else: callback.on_epoch_begin(episode, logs=logs)
<SYSTEM_TASK:> Called at end of each episode for each callback in callbackList <END_TASK> <USER_TASK:> Description: def on_episode_end(self, episode, logs={}): """ Called at end of each episode for each callback in callbackList"""
for callback in self.callbacks: # Check if callback supports the more appropriate `on_episode_end` callback. # If not, fall back to `on_epoch_end` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_episode_end', None)): callback.on_episode_end(episode, logs=logs) else: callback.on_epoch_end(episode, logs=logs)
<SYSTEM_TASK:> Called at beginning of each step for each callback in callbackList <END_TASK> <USER_TASK:> Description: def on_step_begin(self, step, logs={}): """ Called at beginning of each step for each callback in callbackList"""
for callback in self.callbacks: # Check if callback supports the more appropriate `on_step_begin` callback. # If not, fall back to `on_batch_begin` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_step_begin', None)): callback.on_step_begin(step, logs=logs) else: callback.on_batch_begin(step, logs=logs)
<SYSTEM_TASK:> Called at end of each step for each callback in callbackList <END_TASK> <USER_TASK:> Description: def on_step_end(self, step, logs={}): """ Called at end of each step for each callback in callbackList"""
for callback in self.callbacks: # Check if callback supports the more appropriate `on_step_end` callback. # If not, fall back to `on_batch_end` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_step_end', None)): callback.on_step_end(step, logs=logs) else: callback.on_batch_end(step, logs=logs)
<SYSTEM_TASK:> Called at beginning of each action for each callback in callbackList <END_TASK> <USER_TASK:> Description: def on_action_begin(self, action, logs={}): """ Called at beginning of each action for each callback in callbackList"""
for callback in self.callbacks: if callable(getattr(callback, 'on_action_begin', None)): callback.on_action_begin(action, logs=logs)
<SYSTEM_TASK:> Called at end of each action for each callback in callbackList <END_TASK> <USER_TASK:> Description: def on_action_end(self, action, logs={}): """ Called at end of each action for each callback in callbackList"""
for callback in self.callbacks: if callable(getattr(callback, 'on_action_end', None)): callback.on_action_end(action, logs=logs)
<SYSTEM_TASK:> Print training values at beginning of training <END_TASK> <USER_TASK:> Description: def on_train_begin(self, logs): """ Print training values at beginning of training """
self.train_start = timeit.default_timer() self.metrics_names = self.model.metrics_names print('Training for {} steps ...'.format(self.params['nb_steps']))
<SYSTEM_TASK:> Print training time at end of training <END_TASK> <USER_TASK:> Description: def on_train_end(self, logs): """ Print training time at end of training """
duration = timeit.default_timer() - self.train_start print('done, took {:.3f} seconds'.format(duration))
<SYSTEM_TASK:> Reset environment variables at beginning of each episode <END_TASK> <USER_TASK:> Description: def on_episode_begin(self, episode, logs): """ Reset environment variables at beginning of each episode """
self.episode_start[episode] = timeit.default_timer() self.observations[episode] = [] self.rewards[episode] = [] self.actions[episode] = [] self.metrics[episode] = []
<SYSTEM_TASK:> Compute and print training statistics of the episode when done <END_TASK> <USER_TASK:> Description: def on_episode_end(self, episode, logs): """ Compute and print training statistics of the episode when done """
duration = timeit.default_timer() - self.episode_start[episode] episode_steps = len(self.observations[episode]) # Format all metrics. metrics = np.array(self.metrics[episode]) metrics_template = '' metrics_variables = [] with warnings.catch_warnings(): warnings.filterwarnings('error') for idx, name in enumerate(self.metrics_names): if idx > 0: metrics_template += ', ' try: value = np.nanmean(metrics[:, idx]) metrics_template += '{}: {:f}' except Warning: value = '--' metrics_template += '{}: {}' metrics_variables += [name, value] metrics_text = metrics_template.format(*metrics_variables) nb_step_digits = str(int(np.ceil(np.log10(self.params['nb_steps']))) + 1) template = '{step: ' + nb_step_digits + 'd}/{nb_steps}: episode: {episode}, duration: {duration:.3f}s, episode steps: {episode_steps}, steps per second: {sps:.0f}, episode reward: {episode_reward:.3f}, mean reward: {reward_mean:.3f} [{reward_min:.3f}, {reward_max:.3f}], mean action: {action_mean:.3f} [{action_min:.3f}, {action_max:.3f}], mean observation: {obs_mean:.3f} [{obs_min:.3f}, {obs_max:.3f}], {metrics}' variables = { 'step': self.step, 'nb_steps': self.params['nb_steps'], 'episode': episode + 1, 'duration': duration, 'episode_steps': episode_steps, 'sps': float(episode_steps) / duration, 'episode_reward': np.sum(self.rewards[episode]), 'reward_mean': np.mean(self.rewards[episode]), 'reward_min': np.min(self.rewards[episode]), 'reward_max': np.max(self.rewards[episode]), 'action_mean': np.mean(self.actions[episode]), 'action_min': np.min(self.actions[episode]), 'action_max': np.max(self.actions[episode]), 'obs_mean': np.mean(self.observations[episode]), 'obs_min': np.min(self.observations[episode]), 'obs_max': np.max(self.observations[episode]), 'metrics': metrics_text, } print(template.format(**variables)) # Free up resources. del self.episode_start[episode] del self.observations[episode] del self.rewards[episode] del self.actions[episode] del self.metrics[episode]
<SYSTEM_TASK:> Update statistics of episode after each step <END_TASK> <USER_TASK:> Description: def on_step_end(self, step, logs): """ Update statistics of episode after each step """
episode = logs['episode'] self.observations[episode].append(logs['observation']) self.rewards[episode].append(logs['reward']) self.actions[episode].append(logs['action']) self.metrics[episode].append(logs['metrics']) self.step += 1
<SYSTEM_TASK:> Print metrics if interval is over <END_TASK> <USER_TASK:> Description: def on_step_begin(self, step, logs): """ Print metrics if interval is over """
if self.step % self.interval == 0: if len(self.episode_rewards) > 0: metrics = np.array(self.metrics) assert metrics.shape == (self.interval, len(self.metrics_names)) formatted_metrics = '' if not np.isnan(metrics).all(): # not all values are means means = np.nanmean(self.metrics, axis=0) assert means.shape == (len(self.metrics_names),) for name, mean in zip(self.metrics_names, means): formatted_metrics += ' - {}: {:.3f}'.format(name, mean) formatted_infos = '' if len(self.infos) > 0: infos = np.array(self.infos) if not np.isnan(infos).all(): # not all values are means means = np.nanmean(self.infos, axis=0) assert means.shape == (len(self.info_names),) for name, mean in zip(self.info_names, means): formatted_infos += ' - {}: {:.3f}'.format(name, mean) print('{} episodes - episode_reward: {:.3f} [{:.3f}, {:.3f}]{}{}'.format(len(self.episode_rewards), np.mean(self.episode_rewards), np.min(self.episode_rewards), np.max(self.episode_rewards), formatted_metrics, formatted_infos)) print('') self.reset() print('Interval {} ({} steps performed)'.format(self.step // self.interval + 1, self.step))
<SYSTEM_TASK:> Update progression bar at the end of each step <END_TASK> <USER_TASK:> Description: def on_step_end(self, step, logs): """ Update progression bar at the end of each step """
if self.info_names is None: self.info_names = logs['info'].keys() values = [('reward', logs['reward'])] if KERAS_VERSION > '2.1.3': self.progbar.update((self.step % self.interval) + 1, values=values) else: self.progbar.update((self.step % self.interval) + 1, values=values, force=True) self.step += 1 self.metrics.append(logs['metrics']) if len(self.info_names) > 0: self.infos.append([logs['info'][k] for k in self.info_names])
<SYSTEM_TASK:> Initialize metrics at the beginning of each episode <END_TASK> <USER_TASK:> Description: def on_episode_begin(self, episode, logs): """ Initialize metrics at the beginning of each episode """
assert episode not in self.metrics assert episode not in self.starts self.metrics[episode] = [] self.starts[episode] = timeit.default_timer()
<SYSTEM_TASK:> Compute and print metrics at the end of each episode <END_TASK> <USER_TASK:> Description: def on_episode_end(self, episode, logs): """ Compute and print metrics at the end of each episode """
duration = timeit.default_timer() - self.starts[episode] metrics = self.metrics[episode] if np.isnan(metrics).all(): mean_metrics = np.array([np.nan for _ in self.metrics_names]) else: mean_metrics = np.nanmean(metrics, axis=0) assert len(mean_metrics) == len(self.metrics_names) data = list(zip(self.metrics_names, mean_metrics)) data += list(logs.items()) data += [('episode', episode), ('duration', duration)] for key, value in data: if key not in self.data: self.data[key] = [] self.data[key].append(value) if self.interval is not None and episode % self.interval == 0: self.save_data() # Clean up. del self.metrics[episode] del self.starts[episode]
<SYSTEM_TASK:> Save weights at interval steps during training <END_TASK> <USER_TASK:> Description: def on_step_end(self, step, logs={}): """ Save weights at interval steps during training """
self.total_steps += 1 if self.total_steps % self.interval != 0: # Nothing to do. return filepath = self.filepath.format(step=self.total_steps, **logs) if self.verbose > 0: print('Step {}: saving model to {}'.format(self.total_steps, filepath)) self.model.save_weights(filepath, overwrite=True)
<SYSTEM_TASK:> Return an array of zeros with same shape as given observation <END_TASK> <USER_TASK:> Description: def zeroed_observation(observation): """Return an array of zeros with same shape as given observation # Argument observation (list): List of observation # Return A np.ndarray of zeros with observation.shape """
if hasattr(observation, 'shape'): return np.zeros(observation.shape) elif hasattr(observation, '__iter__'): out = [] for x in observation: out.append(zeroed_observation(x)) return out else: return 0.
<SYSTEM_TASK:> Return list of last observations <END_TASK> <USER_TASK:> Description: def get_recent_state(self, current_observation): """Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations """
# This code is slightly complicated by the fact that subsequent observations might be # from different episodes. We ensure that an experience never spans multiple episodes. # This is probably not that important in practice but it seems cleaner. state = [current_observation] idx = len(self.recent_observations) - 1 for offset in range(0, self.window_length - 1): current_idx = idx - offset current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): # The previously handled observation was terminal, don't add the current one. # Otherwise we would leak into a different episode. break state.insert(0, self.recent_observations[current_idx]) while len(state) < self.window_length: state.insert(0, zeroed_observation(state[0])) return state
<SYSTEM_TASK:> Return a randomized batch of experiences <END_TASK> <USER_TASK:> Description: def sample(self, batch_size, batch_idxs=None): """Return a randomized batch of experiences # Argument batch_size (int): Size of the all batch batch_idxs (int): Indexes to extract # Returns A list of experiences randomly selected """
# It is not possible to tell whether the first state in the memory is terminal, because it # would require access to the "terminal" flag associated to the previous state. As a result # we will never return this first state (only using `self.terminals[0]` to know whether the # second state is terminal). # In addition we need enough entries to fill the desired window length. assert self.nb_entries >= self.window_length + 2, 'not enough entries in the memory' if batch_idxs is None: # Draw random indexes such that we have enough entries before each index to fill the # desired window length. batch_idxs = sample_batch_indexes( self.window_length, self.nb_entries - 1, size=batch_size) batch_idxs = np.array(batch_idxs) + 1 assert np.min(batch_idxs) >= self.window_length + 1 assert np.max(batch_idxs) < self.nb_entries assert len(batch_idxs) == batch_size # Create experiences experiences = [] for idx in batch_idxs: terminal0 = self.terminals[idx - 2] while terminal0: # Skip this transition because the environment was reset here. Select a new, random # transition and use this instead. This may cause the batch to contain the same # transition twice. idx = sample_batch_indexes(self.window_length + 1, self.nb_entries, size=1)[0] terminal0 = self.terminals[idx - 2] assert self.window_length + 1 <= idx < self.nb_entries # This code is slightly complicated by the fact that subsequent observations might be # from different episodes. We ensure that an experience never spans multiple episodes. # This is probably not that important in practice but it seems cleaner. state0 = [self.observations[idx - 1]] for offset in range(0, self.window_length - 1): current_idx = idx - 2 - offset assert current_idx >= 1 current_terminal = self.terminals[current_idx - 1] if current_terminal and not self.ignore_episode_boundaries: # The previously handled observation was terminal, don't add the current one. # Otherwise we would leak into a different episode. break state0.insert(0, self.observations[current_idx]) while len(state0) < self.window_length: state0.insert(0, zeroed_observation(state0[0])) action = self.actions[idx - 1] reward = self.rewards[idx - 1] terminal1 = self.terminals[idx - 1] # Okay, now we need to create the follow-up state. This is state0 shifted on timestep # to the right. Again, we need to be careful to not include an observation from the next # episode if the last state is terminal. state1 = [np.copy(x) for x in state0[1:]] state1.append(self.observations[idx]) assert len(state0) == self.window_length assert len(state1) == len(state0) experiences.append(Experience(state0=state0, action=action, reward=reward, state1=state1, terminal1=terminal1)) assert len(experiences) == batch_size return experiences
<SYSTEM_TASK:> Append an observation to the memory <END_TASK> <USER_TASK:> Description: def append(self, observation, action, reward, terminal, training=True): """Append an observation to the memory # Argument observation (dict): Observation returned by environment action (int): Action taken to obtain this observation reward (float): Reward obtained by taking this action terminal (boolean): Is the state terminal """
super(SequentialMemory, self).append(observation, action, reward, terminal, training=training) # This needs to be understood as follows: in `observation`, take `action`, obtain `reward` # and weather the next state is `terminal` or not. if training: self.observations.append(observation) self.actions.append(action) self.rewards.append(reward) self.terminals.append(terminal)
<SYSTEM_TASK:> Return a randomized batch of params and rewards <END_TASK> <USER_TASK:> Description: def sample(self, batch_size, batch_idxs=None): """Return a randomized batch of params and rewards # Argument batch_size (int): Size of the all batch batch_idxs (int): Indexes to extract # Returns A list of params randomly selected and a list of associated rewards """
if batch_idxs is None: batch_idxs = sample_batch_indexes(0, self.nb_entries, size=batch_size) assert len(batch_idxs) == batch_size batch_params = [] batch_total_rewards = [] for idx in batch_idxs: batch_params.append(self.params[idx]) batch_total_rewards.append(self.total_rewards[idx]) return batch_params, batch_total_rewards
<SYSTEM_TASK:> Append a reward to the memory <END_TASK> <USER_TASK:> Description: def append(self, observation, action, reward, terminal, training=True): """Append a reward to the memory # Argument observation (dict): Observation returned by environment action (int): Action taken to obtain this observation reward (float): Reward obtained by taking this action terminal (boolean): Is the state terminal """
super(EpisodeParameterMemory, self).append(observation, action, reward, terminal, training=training) if training: self.intermediate_rewards.append(reward)
<SYSTEM_TASK:> Closes the current episode, sums up rewards and stores the parameters <END_TASK> <USER_TASK:> Description: def finalize_episode(self, params): """Closes the current episode, sums up rewards and stores the parameters # Argument params (object): Parameters associated with the episode to be stored and then retrieved back in sample() """
total_reward = sum(self.intermediate_rewards) self.total_rewards.append(total_reward) self.params.append(params) self.intermediate_rewards = []
<SYSTEM_TASK:> Create a wrapped, SubprocVecEnv for Gym Environments. <END_TASK> <USER_TASK:> Description: def make_gym_env(env_id, num_env=2, seed=123, wrapper_kwargs=None, start_index=0): """ Create a wrapped, SubprocVecEnv for Gym Environments. """
if wrapper_kwargs is None: wrapper_kwargs = {} def make_env(rank): # pylint: disable=C0111 def _thunk(): env = gym.make(env_id) env.seed(seed + rank) return env return _thunk set_global_seeds(seed) return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
<SYSTEM_TASK:> Common CLI options shared by "local invoke" and "local start-api" commands <END_TASK> <USER_TASK:> Description: def invoke_common_options(f): """ Common CLI options shared by "local invoke" and "local start-api" commands :param f: Callback passed by Click """
invoke_options = [ template_click_option(), click.option('--env-vars', '-n', type=click.Path(exists=True), help="JSON file containing values for Lambda function's environment variables."), parameter_override_click_option(), click.option('--debug-port', '-d', help="When specified, Lambda function container will start in debug mode and will expose this " "port on localhost.", envvar="SAM_DEBUG_PORT"), click.option('--debugger-path', help="Host path to a debugger that will be mounted into the Lambda container."), click.option('--debug-args', help="Additional arguments to be passed to the debugger.", envvar="DEBUGGER_ARGS"), click.option('--docker-volume-basedir', '-v', envvar="SAM_DOCKER_VOLUME_BASEDIR", help="Specifies the location basedir where the SAM file exists. If the Docker is running on " "a remote machine, you must mount the path where the SAM file exists on the docker machine " "and modify this value to match the remote machine."), click.option('--log-file', '-l', help="logfile to send runtime logs to."), click.option('--layer-cache-basedir', type=click.Path(exists=False, file_okay=False), envvar="SAM_LAYER_CACHE_BASEDIR", help="Specifies the location basedir where the Layers your template uses will be downloaded to.", default=get_default_layer_cache_dir()), ] + docker_click_options() + [ click.option('--force-image-build', is_flag=True, help='Specify whether CLI should rebuild the image used for invoking functions with layers.', envvar='SAM_FORCE_IMAGE_BUILD', default=False), ] # Reverse the list to maintain ordering of options in help text printed with --help for option in reversed(invoke_options): option(f) return f
<SYSTEM_TASK:> Context Manger that creates the tarball of the Docker Context to use for building the image <END_TASK> <USER_TASK:> Description: def create_tarball(tar_paths): """ Context Manger that creates the tarball of the Docker Context to use for building the image Parameters ---------- tar_paths dict(str, str) Key representing a full path to the file or directory and the Value representing the path within the tarball Yields ------ The tarball file """
tarballfile = TemporaryFile() with tarfile.open(fileobj=tarballfile, mode='w') as archive: for path_on_system, path_in_tarball in tar_paths.items(): archive.add(path_on_system, arcname=path_in_tarball) # Flush are seek to the beginning of the file tarballfile.flush() tarballfile.seek(0) try: yield tarballfile finally: tarballfile.close()
<SYSTEM_TASK:> Creates and starts the Local Lambda Invoke service. This method will block until the service is stopped <END_TASK> <USER_TASK:> Description: def start(self): """ Creates and starts the Local Lambda Invoke service. This method will block until the service is stopped manually using an interrupt. After the service is started, callers can make HTTP requests to the endpoint to invoke the Lambda function and receive a response. NOTE: This is a blocking call that will not return until the thread is interrupted with SIGINT/SIGTERM """
# We care about passing only stderr to the Service and not stdout because stdout from Docker container # contains the response to the API which is sent out as HTTP response. Only stderr needs to be printed # to the console or a log file. stderr from Docker container contains runtime logs and output of print # statements from the Lambda function service = LocalLambdaInvokeService(lambda_runner=self.lambda_runner, port=self.port, host=self.host, stderr=self.stderr_stream) service.create() LOG.info("Starting the Local Lambda Service. You can now invoke your Lambda Functions defined in your template" " through the endpoint.") service.run()
<SYSTEM_TASK:> Extracts the SAM Function CodeUri from the Resource Properties <END_TASK> <USER_TASK:> Description: def _extract_sam_function_codeuri(name, resource_properties, code_property_key): """ Extracts the SAM Function CodeUri from the Resource Properties Parameters ---------- name str LogicalId of the resource resource_properties dict Dictionary representing the Properties of the Resource code_property_key str Property Key of the code on the Resource Returns ------- str Representing the local code path """
codeuri = resource_properties.get(code_property_key, SamFunctionProvider._DEFAULT_CODEURI) # CodeUri can be a dictionary of S3 Bucket/Key or a S3 URI, neither of which are supported if isinstance(codeuri, dict) or \ (isinstance(codeuri, six.string_types) and codeuri.startswith("s3://")): codeuri = SamFunctionProvider._DEFAULT_CODEURI LOG.warning("Lambda function '%s' has specified S3 location for CodeUri which is unsupported. " "Using default value of '%s' instead", name, codeuri) return codeuri
<SYSTEM_TASK:> Extracts the Lambda Function Code from the Resource Properties <END_TASK> <USER_TASK:> Description: def _extract_lambda_function_code(resource_properties, code_property_key): """ Extracts the Lambda Function Code from the Resource Properties Parameters ---------- resource_properties dict Dictionary representing the Properties of the Resource code_property_key str Property Key of the code on the Resource Returns ------- str Representing the local code path """
codeuri = resource_properties.get(code_property_key, SamFunctionProvider._DEFAULT_CODEURI) if isinstance(codeuri, dict): codeuri = SamFunctionProvider._DEFAULT_CODEURI return codeuri
<SYSTEM_TASK:> Creates a list of Layer objects that are represented by the resources and the list of layers <END_TASK> <USER_TASK:> Description: def _parse_layer_info(list_of_layers, resources): """ Creates a list of Layer objects that are represented by the resources and the list of layers Parameters ---------- list_of_layers List(str) List of layers that are defined within the Layers Property on a function resources dict The Resources dictionary defined in a template Returns ------- List(samcli.commands.local.lib.provider.Layer) List of the Layer objects created from the template and layer list defined on the function. The order of the layers does not change. I.E: list_of_layers = ["layer1", "layer2"] the return would be [Layer("layer1"), Layer("layer2")] """
layers = [] for layer in list_of_layers: # If the layer is a string, assume it is the arn if isinstance(layer, six.string_types): layers.append(LayerVersion(layer, None)) continue # In the list of layers that is defined within a template, you can reference a LayerVersion resource. # When running locally, we need to follow that Ref so we can extract the local path to the layer code. if isinstance(layer, dict) and layer.get("Ref"): layer_logical_id = layer.get("Ref") layer_resource = resources.get(layer_logical_id) if not layer_resource or \ layer_resource.get("Type", "") not in (SamFunctionProvider._SERVERLESS_LAYER, SamFunctionProvider._LAMBDA_LAYER): raise InvalidLayerReference() layer_properties = layer_resource.get("Properties", {}) resource_type = layer_resource.get("Type") codeuri = None if resource_type == SamFunctionProvider._LAMBDA_LAYER: codeuri = SamFunctionProvider._extract_lambda_function_code(layer_properties, "Content") if resource_type == SamFunctionProvider._SERVERLESS_LAYER: codeuri = SamFunctionProvider._extract_sam_function_codeuri(layer_logical_id, layer_properties, "ContentUri") layers.append(LayerVersion(layer_logical_id, codeuri)) return layers
<SYSTEM_TASK:> Resolves the values from different sources and returns a dict of environment variables to use when running <END_TASK> <USER_TASK:> Description: def resolve(self): """ Resolves the values from different sources and returns a dict of environment variables to use when running the function locally. :return dict: Dict where key is the variable name and value is the value of the variable. Both key and values are strings """
# AWS_* variables must always be passed to the function, but user has the choice to override them result = self._get_aws_variables() # Default value for the variable gets lowest priority for name, value in self.variables.items(): # Shell environment values, second priority if name in self.shell_env_values: value = self.shell_env_values[name] # Overridden values, highest priority if name in self.override_values: value = self.override_values[name] # Any value must be a string when passed to Lambda runtime. # Runtime expects a Map<String, String> for environment variables result[name] = self._stringify_value(value) return result
<SYSTEM_TASK:> This method stringifies values of environment variables. If the value of the method is a list or dictionary, <END_TASK> <USER_TASK:> Description: def _stringify_value(self, value): """ This method stringifies values of environment variables. If the value of the method is a list or dictionary, then this method will replace it with empty string. Values of environment variables in Lambda must be a string. List or dictionary usually means they are intrinsic functions which have not been resolved. :param value: Value to stringify :return string: Stringified value """
# List/dict/None values are replaced with a blank if isinstance(value, (dict, list, tuple)) or value is None: result = self._BLANK_VALUE # str(True) will output "True". To maintain backwards compatibility we need to output "true" or "false" elif value is True: result = "true" elif value is False: result = "false" # value is a scalar type like int, str which can be stringified # do not stringify unicode in Py2, Py3 str supports unicode elif sys.version_info.major > 2: result = str(value) elif not isinstance(value, unicode): # noqa: F821 pylint: disable=undefined-variable result = str(value) else: result = value return result
<SYSTEM_TASK:> Removes a container that was created earlier. <END_TASK> <USER_TASK:> Description: def delete(self): """ Removes a container that was created earlier. """
if not self.is_created(): LOG.debug("Container was not created. Skipping deletion") return try: self.docker_client.containers\ .get(self.id)\ .remove(force=True) # Remove a container, even if it is running except docker.errors.NotFound: # Container is already not there LOG.debug("Container with ID %s does not exist. Skipping deletion", self.id) except docker.errors.APIError as ex: msg = str(ex) removal_in_progress = ("removal of container" in msg) and ("is already in progress" in msg) # When removal is already started, Docker API will throw an exception # Skip such exceptions. if not removal_in_progress: raise ex self.id = None
<SYSTEM_TASK:> Calls Docker API to start the container. The container must be created at the first place to run. <END_TASK> <USER_TASK:> Description: def start(self, input_data=None): """ Calls Docker API to start the container. The container must be created at the first place to run. It waits for the container to complete, fetches both stdout and stderr logs and returns through the given streams. Parameters ---------- input_data Optional. Input data sent to the container through container's stdin. """
if input_data: raise ValueError("Passing input through container's stdin is not supported") if not self.is_created(): raise RuntimeError("Container does not exist. Cannot start this container") # Get the underlying container instance from Docker API real_container = self.docker_client.containers.get(self.id) # Start the container real_container.start()
<SYSTEM_TASK:> Based on the data returned from the Container output, via the iterator, write it to the appropriate streams <END_TASK> <USER_TASK:> Description: def _write_container_output(output_itr, stdout=None, stderr=None): """ Based on the data returned from the Container output, via the iterator, write it to the appropriate streams Parameters ---------- output_itr: Iterator Iterator returned by the Docker Attach command stdout: samcli.lib.utils.stream_writer.StreamWriter, optional Stream writer to write stdout data from Container into stderr: samcli.lib.utils.stream_writer.StreamWriter, optional Stream writer to write stderr data from the Container into """
# Iterator returns a tuple of (frame_type, data) where the frame type determines which stream we write output # to for frame_type, data in output_itr: if frame_type == Container._STDOUT_FRAME_TYPE and stdout: # Frame type 1 is stdout data. stdout.write(data) elif frame_type == Container._STDERR_FRAME_TYPE and stderr: # Frame type 2 is stderr data. stderr.write(data) else: # Either an unsupported frame type or stream for this frame type is not configured LOG.debug("Dropping Docker container output because of unconfigured frame type. " "Frame Type: %s. Data: %s", frame_type, data)
<SYSTEM_TASK:> \b <END_TASK> <USER_TASK:> Description: def cli(ctx, location, runtime, dependency_manager, output_dir, name, no_input): """ \b Initialize a serverless application with a SAM template, folder structure for your Lambda functions, connected to an event source such as APIs, S3 Buckets or DynamoDB Tables. This application includes everything you need to get started with serverless and eventually grow into a production scale application. \b This command can initialize a boilerplate serverless app. If you want to create your own template as well as use a custom location please take a look at our official documentation. \b Common usage: \b Initializes a new SAM project using Python 3.6 default template runtime \b $ sam init --runtime python3.6 \b Initializes a new SAM project using Java 8 and Gradle dependency manager \b $ sam init --runtime java8 --dependency-manager gradle \b Initializes a new SAM project using custom template in a Git/Mercurial repository \b # gh being expanded to github url $ sam init --location gh:aws-samples/cookiecutter-aws-sam-python \b $ sam init --location git+ssh://[email protected]/aws-samples/cookiecutter-aws-sam-python.git \b $ sam init --location hg+ssh://[email protected]/repo/template-name \b Initializes a new SAM project using custom template in a Zipfile \b $ sam init --location /path/to/template.zip \b $ sam init --location https://example.com/path/to/template.zip \b Initializes a new SAM project using custom template in a local path \b $ sam init --location /path/to/template/folder """
# All logic must be implemented in the `do_cli` method. This helps ease unit tests do_cli(ctx, location, runtime, dependency_manager, output_dir, name, no_input)
<SYSTEM_TASK:> Parses a swagger document and returns a list of APIs configured in the document. <END_TASK> <USER_TASK:> Description: def get_apis(self): """ Parses a swagger document and returns a list of APIs configured in the document. Swagger documents have the following structure { "/path1": { # path "get": { # method "x-amazon-apigateway-integration": { # integration "type": "aws_proxy", # URI contains the Lambda function ARN that needs to be parsed to get Function Name "uri": { "Fn::Sub": "arn:aws:apigateway:aws:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/..." } } }, "post": { }, }, "/path2": { ... } } Returns ------- list of samcli.commands.local.lib.provider.Api List of APIs that are configured in the Swagger document """
result = [] paths_dict = self.swagger.get("paths", {}) binary_media_types = self.get_binary_media_types() for full_path, path_config in paths_dict.items(): for method, method_config in path_config.items(): function_name = self._get_integration_function_name(method_config) if not function_name: LOG.debug("Lambda function integration not found in Swagger document at path='%s' method='%s'", full_path, method) continue if method.lower() == self._ANY_METHOD_EXTENSION_KEY: # Convert to a more commonly used method notation method = self._ANY_METHOD api = Api(path=full_path, method=method, function_name=function_name, cors=None, binary_media_types=binary_media_types) result.append(api) return result
<SYSTEM_TASK:> Tries to parse the Lambda Function name from the Integration defined in the method configuration. <END_TASK> <USER_TASK:> Description: def _get_integration_function_name(self, method_config): """ Tries to parse the Lambda Function name from the Integration defined in the method configuration. Integration configuration is defined under the special "x-amazon-apigateway-integration" key. We care only about Lambda integrations, which are of type aws_proxy, and ignore the rest. Integration URI is complex and hard to parse. Hence we do our best to extract function name out of integration URI. If not possible, we return None. Parameters ---------- method_config : dict Dictionary containing the method configuration which might contain integration settings Returns ------- string or None Lambda function name, if possible. None, if not. """
if not isinstance(method_config, dict) or self._INTEGRATION_KEY not in method_config: return None integration = method_config[self._INTEGRATION_KEY] if integration \ and isinstance(integration, dict) \ and integration.get("type") == IntegrationType.aws_proxy.value: # Integration must be "aws_proxy" otherwise we don't care about it return LambdaUri.get_function_name(integration.get("uri"))
<SYSTEM_TASK:> Formats the given CloudWatch Logs Event dictionary as necessary and returns an iterable that will <END_TASK> <USER_TASK:> Description: def do_format(self, event_iterable): """ Formats the given CloudWatch Logs Event dictionary as necessary and returns an iterable that will return the formatted string. This can be used to parse and format the events based on context ie. In Lambda Function logs, a formatter may wish to color the "ERROR" keywords red, or highlight a filter keyword separately etc. This method takes an iterable as input and returns an iterable. It does not immediately format the event. Instead, it sets up the formatter chain appropriately and returns the iterable. Actual formatting happens only when the iterable is used by the caller. Parameters ---------- event_iterable : iterable of samcli.lib.logs.event.LogEvent Iterable that returns an object containing information about each log event. Returns ------- iterable of string Iterable that returns a formatted event as a string. """
for operation in self.formatter_chain: # Make sure the operation has access to certain basic objects like colored partial_op = functools.partial(operation, colored=self.colored) event_iterable = imap(partial_op, event_iterable) return event_iterable
<SYSTEM_TASK:> Basic formatter to convert an event object to string <END_TASK> <USER_TASK:> Description: def _pretty_print_event(event, colored): """ Basic formatter to convert an event object to string """
event.timestamp = colored.yellow(event.timestamp) event.log_stream_name = colored.cyan(event.log_stream_name) return ' '.join([event.log_stream_name, event.timestamp, event.message])
<SYSTEM_TASK:> Highlight the keyword in the log statement by drawing an underline <END_TASK> <USER_TASK:> Description: def highlight_keywords(self, event, colored): """ Highlight the keyword in the log statement by drawing an underline """
if self.keyword: highlight = colored.underline(self.keyword) event.message = event.message.replace(self.keyword, highlight) return event
<SYSTEM_TASK:> If the event message is a JSON string, then pretty print the JSON with 2 indents and sort the keys. This makes <END_TASK> <USER_TASK:> Description: def format_json(event, colored): """ If the event message is a JSON string, then pretty print the JSON with 2 indents and sort the keys. This makes it very easy to visually parse and search JSON data """
try: if event.message.startswith("{"): msg_dict = json.loads(event.message) event.message = json.dumps(msg_dict, indent=2) except Exception: # Skip if the event message was not JSON pass return event
<SYSTEM_TASK:> If the given ``path`` is a relative path, then assume it is relative to ``original_root``. This method will <END_TASK> <USER_TASK:> Description: def _resolve_relative_to(path, original_root, new_root): """ If the given ``path`` is a relative path, then assume it is relative to ``original_root``. This method will update the path to be resolve it relative to ``new_root`` and return. Examples ------- # Assume a file called template.txt at location /tmp/original/root/template.txt expressed as relative path # We are trying to update it to be relative to /tmp/new/root instead of the /tmp/original/root >>> result = _resolve_relative_to("template.txt", \ "/tmp/original/root", \ "/tmp/new/root") >>> result ../../original/root/template.txt Returns ------- Updated path if the given path is a relative path. None, if the path is not a relative path. """
if not isinstance(path, six.string_types) \ or path.startswith("s3://") \ or os.path.isabs(path): # Value is definitely NOT a relative path. It is either a S3 URi or Absolute path or not a string at all return None # Value is definitely a relative path. Change it relative to the destination directory return os.path.relpath( os.path.normpath(os.path.join(original_root, path)), # Absolute original path w.r.t ``original_root`` new_root)
<SYSTEM_TASK:> Gets the Swagger document from either of the given locations. If we fail to retrieve or parse the Swagger <END_TASK> <USER_TASK:> Description: def read(self): """ Gets the Swagger document from either of the given locations. If we fail to retrieve or parse the Swagger file, this method will return None. Returns ------- dict: Swagger document. None, if we cannot retrieve the document """
swagger = None # First check if there is inline swagger if self.definition_body: swagger = self._read_from_definition_body() if not swagger and self.definition_uri: # If not, then try to download it from the given URI swagger = self._download_swagger(self.definition_uri) return swagger
<SYSTEM_TASK:> Download the file from given local or remote location and return it <END_TASK> <USER_TASK:> Description: def _download_swagger(self, location): """ Download the file from given local or remote location and return it Parameters ---------- location : str or dict Local path or S3 path to Swagger file to download. Consult the ``__init__.py`` documentation for specifics on structure of this property. Returns ------- dict or None Downloaded and parsed Swagger document. None, if unable to download """
if not location: return bucket, key, version = self._parse_s3_location(location) if bucket and key: LOG.debug("Downloading Swagger document from Bucket=%s, Key=%s, Version=%s", bucket, key, version) swagger_str = self._download_from_s3(bucket, key, version) return yaml_parse(swagger_str) if not isinstance(location, string_types): # This is not a string and not a S3 Location dictionary. Probably something invalid LOG.debug("Unable to download Swagger file. Invalid location: %s", location) return # ``location`` is a string and not a S3 path. It is probably a local path. Let's resolve relative path if any filepath = location if self.working_dir: # Resolve relative paths, if any, with respect to working directory filepath = os.path.join(self.working_dir, location) if not os.path.exists(filepath): LOG.debug("Unable to download Swagger file. File not found at location %s", filepath) return LOG.debug("Reading Swagger document from local file at %s", filepath) with open(filepath, "r") as fp: return yaml_parse(fp.read())
<SYSTEM_TASK:> Download a file from given S3 location, if available. <END_TASK> <USER_TASK:> Description: def _download_from_s3(bucket, key, version=None): """ Download a file from given S3 location, if available. Parameters ---------- bucket : str S3 Bucket name key : str S3 Bucket Key aka file path version : str Optional Version ID of the file Returns ------- str Contents of the file that was downloaded Raises ------ botocore.exceptions.ClientError if we were unable to download the file from S3 """
s3 = boto3.client('s3') extra_args = {} if version: extra_args["VersionId"] = version with tempfile.TemporaryFile() as fp: try: s3.download_fileobj( bucket, key, fp, ExtraArgs=extra_args) # go to start of file fp.seek(0) # Read and return all the contents return fp.read() except botocore.exceptions.ClientError: LOG.error("Unable to download Swagger document from S3 Bucket=%s Key=%s Version=%s", bucket, key, version) raise
<SYSTEM_TASK:> Generates project using cookiecutter and options given <END_TASK> <USER_TASK:> Description: def generate_project( location=None, runtime="nodejs", dependency_manager=None, output_dir=".", name='sam-sample-app', no_input=False): """Generates project using cookiecutter and options given Generate project scaffolds a project using default templates if user doesn't provide one via location parameter. Default templates are automatically chosen depending on runtime given by the user. Parameters ---------- location: Path, optional Git, HTTP, Local path or Zip containing cookiecutter template (the default is None, which means no custom template) runtime: str, optional Lambda Runtime (the default is "nodejs", which creates a nodejs project) dependency_manager: str, optional Dependency Manager for the Lambda Runtime Project(the default is "npm" for a "nodejs" Lambda runtime) output_dir: str, optional Output directory where project should be generated (the default is ".", which implies current folder) name: str, optional Name of the project (the default is "sam-sample-app", which implies a project named sam-sample-app will be created) no_input : bool, optional Whether to prompt for input or to accept default values (the default is False, which prompts the user for values it doesn't know for baking) Raises ------ GenerateProjectFailedError If the process of baking a project fails """
template = None for mapping in list(itertools.chain(*(RUNTIME_DEP_TEMPLATE_MAPPING.values()))): if runtime in mapping['runtimes'] or any([r.startswith(runtime) for r in mapping['runtimes']]): if not dependency_manager: template = mapping['init_location'] break elif dependency_manager == mapping['dependency_manager']: template = mapping['init_location'] if not template: msg = "Lambda Runtime {} does not support dependency manager: {}".format(runtime, dependency_manager) raise GenerateProjectFailedError(project=name, provider_error=msg) params = { "template": location if location else template, "output_dir": output_dir, "no_input": no_input } LOG.debug("Parameters dict created with input given") LOG.debug("%s", params) if not location and name is not None: params['extra_context'] = {'project_name': name, 'runtime': runtime} params['no_input'] = True LOG.debug("Parameters dict updated with project name as extra_context") LOG.debug("%s", params) try: LOG.debug("Baking a new template with cookiecutter with all parameters") cookiecutter(**params) except CookiecutterException as e: raise GenerateProjectFailedError(project=name, provider_error=e)
<SYSTEM_TASK:> Convert the given date to UTC, if the date contains a timezone. <END_TASK> <USER_TASK:> Description: def to_utc(some_time): """ Convert the given date to UTC, if the date contains a timezone. Parameters ---------- some_time : datetime.datetime datetime object to convert to UTC Returns ------- datetime.datetime Converted datetime object """
# Convert timezone aware objects to UTC if some_time.tzinfo and some_time.utcoffset(): some_time = some_time.astimezone(tzutc()) # Now that time is UTC, simply remove the timezone component. return some_time.replace(tzinfo=None)
<SYSTEM_TASK:> Parse the given string as datetime object. This parser supports in almost any string formats. <END_TASK> <USER_TASK:> Description: def parse_date(date_string): """ Parse the given string as datetime object. This parser supports in almost any string formats. For relative times, like `10min ago`, this parser computes the actual time relative to current UTC time. This allows time to always be in UTC if an explicit time zone is not provided. Parameters ---------- date_string : str String representing the date Returns ------- datetime.datetime Parsed datetime object. None, if the string cannot be parsed. """
parser_settings = { # Relative times like '10m ago' must subtract from the current UTC time. Without this setting, dateparser # will use current local time as the base for subtraction, but falsely assume it is a UTC time. Therefore # the time that dateparser returns will be a `datetime` object that did not have any timezone information. # So be explicit to set the time to UTC. "RELATIVE_BASE": datetime.datetime.utcnow() } return dateparser.parse(date_string, settings=parser_settings)
<SYSTEM_TASK:> Returns name of the function to invoke. If no function identifier is provided, this method will return name of <END_TASK> <USER_TASK:> Description: def function_name(self): """ Returns name of the function to invoke. If no function identifier is provided, this method will return name of the only function from the template :return string: Name of the function :raises InvokeContextException: If function identifier is not provided """
if self._function_identifier: return self._function_identifier # Function Identifier is *not* provided. If there is only one function in the template, # default to it. all_functions = [f for f in self._function_provider.get_all()] if len(all_functions) == 1: return all_functions[0].name # Get all the available function names to print helpful exception message all_function_names = [f.name for f in all_functions] # There are more functions in the template, and function identifier is not provided, hence raise. raise InvokeContextException("You must provide a function identifier (function's Logical ID in the template). " "Possible options in your template: {}".format(all_function_names))
<SYSTEM_TASK:> Returns an instance of the runner capable of running Lambda functions locally <END_TASK> <USER_TASK:> Description: def local_lambda_runner(self): """ Returns an instance of the runner capable of running Lambda functions locally :return samcli.commands.local.lib.local_lambda.LocalLambdaRunner: Runner configured to run Lambda functions locally """
layer_downloader = LayerDownloader(self._layer_cache_basedir, self.get_cwd()) image_builder = LambdaImage(layer_downloader, self._skip_pull_image, self._force_image_build) lambda_runtime = LambdaRuntime(self._container_manager, image_builder) return LocalLambdaRunner(local_runtime=lambda_runtime, function_provider=self._function_provider, cwd=self.get_cwd(), env_vars_values=self._env_vars_value, debug_context=self._debug_context)
<SYSTEM_TASK:> Returns stream writer for stdout to output Lambda function logs to <END_TASK> <USER_TASK:> Description: def stdout(self): """ Returns stream writer for stdout to output Lambda function logs to Returns ------- samcli.lib.utils.stream_writer.StreamWriter Stream writer for stdout """
stream = self._log_file_handle if self._log_file_handle else osutils.stdout() return StreamWriter(stream, self._is_debugging)
<SYSTEM_TASK:> Returns stream writer for stderr to output Lambda function errors to <END_TASK> <USER_TASK:> Description: def stderr(self): """ Returns stream writer for stderr to output Lambda function errors to Returns ------- samcli.lib.utils.stream_writer.StreamWriter Stream writer for stderr """
stream = self._log_file_handle if self._log_file_handle else osutils.stderr() return StreamWriter(stream, self._is_debugging)
<SYSTEM_TASK:> Get the working directory. This is usually relative to the directory that contains the template. If a Docker <END_TASK> <USER_TASK:> Description: def get_cwd(self): """ Get the working directory. This is usually relative to the directory that contains the template. If a Docker volume location is specified, it takes preference All Lambda function code paths are resolved relative to this working directory :return string: Working directory """
cwd = os.path.dirname(os.path.abspath(self._template_file)) if self._docker_volume_basedir: cwd = self._docker_volume_basedir return cwd
<SYSTEM_TASK:> If the user provided a file containing values of environment variables, this method will read the file and <END_TASK> <USER_TASK:> Description: def _get_env_vars_value(filename): """ If the user provided a file containing values of environment variables, this method will read the file and return its value :param string filename: Path to file containing environment variable values :return dict: Value of environment variables, if provided. None otherwise :raises InvokeContextException: If the file was not found or not a valid JSON """
if not filename: return None # Try to read the file and parse it as JSON try: with open(filename, 'r') as fp: return json.load(fp) except Exception as ex: raise InvokeContextException("Could not read environment variables overrides from file {}: {}".format( filename, str(ex)))
<SYSTEM_TASK:> Creates a DebugContext if the InvokeContext is in a debugging mode <END_TASK> <USER_TASK:> Description: def _get_debug_context(debug_port, debug_args, debugger_path): """ Creates a DebugContext if the InvokeContext is in a debugging mode Parameters ---------- debug_port int Port to bind the debugger to debug_args str Additional arguments passed to the debugger debugger_path str Path to the directory of the debugger to mount on Docker Returns ------- samcli.commands.local.lib.debug_context.DebugContext Object representing the DebugContext Raises ------ samcli.commands.local.cli_common.user_exceptions.DebugContext When the debugger_path is not valid """
if debug_port and debugger_path: try: debugger = Path(debugger_path).resolve(strict=True) except OSError as error: if error.errno == errno.ENOENT: raise DebugContextException("'{}' could not be found.".format(debugger_path)) else: raise error # We turn off pylint here due to https://github.com/PyCQA/pylint/issues/1660 if not debugger.is_dir(): # pylint: disable=no-member raise DebugContextException("'{}' should be a directory with the debugger in it.".format(debugger_path)) debugger_path = str(debugger) return DebugContext(debug_port=debug_port, debug_args=debug_args, debugger_path=debugger_path)