_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q2500
RepoResultManager.add_git
train
def add_git(meta, git_info): """Enrich the result meta information with commit data.""" meta["hexsha"] = git_info.hexsha meta["author"] = git_info.author meta["email"] = git_info.email meta["authored_on"] = git_info.authored_on.isoformat(" ")
python
{ "resource": "" }
q2501
RepoResultManager.load
train
def load(self, commit=None): """Load a result from the storage directory.""" git_info = self.record_git_info(commit) LOGGER.debug("Loading the result for commit '%s'.", git_info.hexsha) filename = self.get_filename(git_info) LOGGER.debug("Loading the result '%s'.", filename) result = super(RepoResultManager, self).load(filename) self.add_git(result.meta, git_info) return result
python
{ "resource": "" }
q2502
MemoteExtension.normalize
train
def normalize(filename): """Return an absolute path of the given file name.""" # Default value means we do not resolve a model file. if filename == "default": return filename filename = expanduser(filename) if isabs(filename): return filename else: return join(os.getcwd(), filename)
python
{ "resource": "" }
q2503
GrowthExperiment.evaluate
train
def evaluate(self, model, threshold=0.1): """Evaluate in silico growth rates.""" with model: if self.medium is not None: self.medium.apply(model) if self.objective is not None: model.objective = self.objective model.add_cons_vars(self.constraints) threshold *= model.slim_optimize() growth = list() for row in self.data.itertuples(index=False): with model: exchange = model.reactions.get_by_id(row.exchange) if bool(exchange.reactants): exchange.lower_bound = -row.uptake else: exchange.upper_bound = row.uptake growth.append(model.slim_optimize() >= threshold) return DataFrame({ "exchange": self.data["exchange"], "growth": growth })
python
{ "resource": "" }
q2504
BJSON.process_bind_param
train
def process_bind_param(self, value, dialect): """Convert the value to a JSON encoded string before storing it.""" try: with BytesIO() as stream: with GzipFile(fileobj=stream, mode="wb") as file_handle: file_handle.write( jsonify(value, pretty=False).encode("utf-8") ) output = stream.getvalue() return output except TypeError as error: log_json_incompatible_types(value) raise_with_traceback(error)
python
{ "resource": "" }
q2505
BJSON.process_result_value
train
def process_result_value(self, value, dialect): """Convert a JSON encoded string to a dictionary structure.""" if value is not None: with BytesIO(value) as stream: with GzipFile(fileobj=stream, mode="rb") as file_handle: value = json.loads(file_handle.read().decode("utf-8")) return value
python
{ "resource": "" }
q2506
ZXCVBNValidator.validate
train
def validate(self, password, user=None): """Validate method, run zxcvbn and check score.""" user_inputs = [] if user is not None: for attribute in self.user_attributes: if hasattr(user, attribute): user_inputs.append(getattr(user, attribute)) results = zxcvbn(password, user_inputs=user_inputs) if results.get('score', 0) < self.min_score: feedback = ', '.join( results.get('feedback', {}).get('suggestions', [])) raise ValidationError(_(feedback), code=self.code, params={})
python
{ "resource": "" }
q2507
_get_html_contents
train
def _get_html_contents(html): """Process a HTML block and detects whether it is a code block, a math block, or a regular HTML block.""" parser = MyHTMLParser() parser.feed(html) if parser.is_code: return ('code', parser.data.strip()) elif parser.is_math: return ('math', parser.data.strip()) else: return '', ''
python
{ "resource": "" }
q2508
_is_path
train
def _is_path(s): """Return whether an object is a path.""" if isinstance(s, string_types): try: return op.exists(s) except (OSError, ValueError): return False else: return False
python
{ "resource": "" }
q2509
FormatManager.format_manager
train
def format_manager(cls): """Return the instance singleton, creating if necessary """ if cls._instance is None: # Discover the formats and register them with a new singleton. cls._instance = cls().register_entrypoints() return cls._instance
python
{ "resource": "" }
q2510
FormatManager.register_entrypoints
train
def register_entrypoints(self): """Look through the `setup_tools` `entry_points` and load all of the formats. """ for spec in iter_entry_points(self.entry_point_group): format_properties = {"name": spec.name} try: format_properties.update(spec.load()) except (DistributionNotFound, ImportError) as err: self.log.info( "ipymd format {} could not be loaded: {}".format( spec.name, err)) continue self.register(**format_properties) return self
python
{ "resource": "" }
q2511
FormatManager.format_from_extension
train
def format_from_extension(self, extension): """Find a format from its extension.""" formats = [name for name, format in self._formats.items() if format.get('file_extension', None) == extension] if len(formats) == 0: return None elif len(formats) == 2: raise RuntimeError("Several extensions are registered with " "that extension; please specify the format " "explicitly.") else: return formats[0]
python
{ "resource": "" }
q2512
FormatManager.load
train
def load(self, file, name=None): """Load a file. The format name can be specified explicitly or inferred from the file extension.""" if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == 'text': return _read_text(file) elif file_format == 'json': return _read_json(file) else: load_function = self._formats[name].get('load', None) if load_function is None: raise IOError("The format must declare a file type or " "load/save functions.") return load_function(file)
python
{ "resource": "" }
q2513
FormatManager.save
train
def save(self, file, contents, name=None, overwrite=False): """Save contents into a file. The format name can be specified explicitly or inferred from the file extension.""" if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == 'text': _write_text(file, contents) elif file_format == 'json': _write_json(file, contents) else: write_function = self._formats[name].get('save', None) if write_function is None: raise IOError("The format must declare a file type or " "load/save functions.") if op.exists(file) and not overwrite: print("The file already exists, please use overwrite=True.") return write_function(file, contents)
python
{ "resource": "" }
q2514
FormatManager.create_reader
train
def create_reader(self, name, *args, **kwargs): """Create a new reader instance for a given format.""" self._check_format(name) return self._formats[name]['reader'](*args, **kwargs)
python
{ "resource": "" }
q2515
FormatManager.create_writer
train
def create_writer(self, name, *args, **kwargs): """Create a new writer instance for a given format.""" self._check_format(name) return self._formats[name]['writer'](*args, **kwargs)
python
{ "resource": "" }
q2516
FormatManager.convert
train
def convert(self, contents_or_path, from_=None, to=None, reader=None, writer=None, from_kwargs=None, to_kwargs=None, ): """Convert contents between supported formats. Parameters ---------- contents : str The contents to convert from. from_ : str or None The name of the source format. If None, this is the ipymd_cells format. to : str or None The name of the target format. If None, this is the ipymd_cells format. reader : a Reader instance or None writer : a Writer instance or None from_kwargs : dict Optional keyword arguments to pass to the reader instance. to_kwargs : dict Optional keyword arguments to pass to the writer instance. """ # Load the file if 'contents_or_path' is a path. if _is_path(contents_or_path): contents = self.load(contents_or_path, from_) else: contents = contents_or_path if from_kwargs is None: from_kwargs = {} if to_kwargs is None: to_kwargs = {} if reader is None: reader = (self.create_reader(from_, **from_kwargs) if from_ is not None else None) if writer is None: writer = (self.create_writer(to, **to_kwargs) if to is not None else None) if reader is not None: # Convert from the source format to ipymd cells. cells = [cell for cell in reader.read(contents)] else: # If no reader is specified, 'contents' is assumed to already be # a list of ipymd cells. cells = contents notebook_metadata = [cell for cell in cells if cell["cell_type"] == "notebook_metadata"] if writer is not None: if notebook_metadata: [cells.remove(cell) for cell in notebook_metadata] notebook_metadata = self.clean_meta( notebook_metadata[0]["metadata"] ) if hasattr(writer, "write_notebook_metadata"): writer.write_notebook_metadata(notebook_metadata) else: print("{} does not support notebook metadata, " "dropping metadata: {}".format( writer, notebook_metadata)) # Convert from ipymd cells to the target format. for cell in cells: meta = self.clean_cell_meta(cell.get("metadata", {})) if not meta: cell.pop("metadata", None) writer.write(cell) return writer.contents else: # If no writer is specified, the output is supposed to be # a list of ipymd cells. return cells
python
{ "resource": "" }
q2517
FormatManager.clean_meta
train
def clean_meta(self, meta): """Removes unwanted metadata Parameters ---------- meta : dict Notebook metadata. """ if not self.verbose_metadata: default_kernel_name = (self.default_kernel_name or self._km.kernel_name) if (meta.get("kernelspec", {}) .get("name", None) == default_kernel_name): del meta["kernelspec"] meta.pop("language_info", None) return meta
python
{ "resource": "" }
q2518
FormatManager.clean_cell_meta
train
def clean_cell_meta(self, meta): """Remove cell metadata that matches the default cell metadata.""" for k, v in DEFAULT_CELL_METADATA.items(): if meta.get(k, None) == v: meta.pop(k, None) return meta
python
{ "resource": "" }
q2519
_starts_with_regex
train
def _starts_with_regex(line, regex): """Return whether a line starts with a regex or not.""" if not regex.startswith('^'): regex = '^' + regex reg = re.compile(regex) return reg.match(line)
python
{ "resource": "" }
q2520
create_prompt
train
def create_prompt(prompt): """Create a prompt manager. Parameters ---------- prompt : str or class driving from BasePromptManager The prompt name ('python' or 'ipython') or a custom PromptManager class. """ if prompt is None: prompt = 'python' if prompt == 'python': prompt = PythonPromptManager elif prompt == 'ipython': prompt = IPythonPromptManager # Instanciate the class. if isinstance(prompt, BasePromptManager): return prompt else: return prompt()
python
{ "resource": "" }
q2521
BasePromptManager.split_input_output
train
def split_input_output(self, text): """Split code into input lines and output lines, according to the input and output prompt templates.""" lines = _to_lines(text) i = 0 for line in lines: if _starts_with_regex(line, self.input_prompt_regex): i += 1 else: break return lines[:i], lines[i:]
python
{ "resource": "" }
q2522
_split_python
train
def _split_python(python): """Split Python source into chunks. Chunks are separated by at least two return lines. The break must not be followed by a space. Also, long Python strings spanning several lines are not splitted. """ python = _preprocess(python) if not python: return [] lexer = PythonSplitLexer() lexer.read(python) return lexer.chunks
python
{ "resource": "" }
q2523
_is_chunk_markdown
train
def _is_chunk_markdown(source): """Return whether a chunk contains Markdown contents.""" lines = source.splitlines() if all(line.startswith('# ') for line in lines): # The chunk is a Markdown *unless* it is commented Python code. source = '\n'.join(line[2:] for line in lines if not line[2:].startswith('#')) # skip headers if not source: return True # Try to parse the chunk: if it fails, it is Markdown, otherwise, # it is Python. return not _is_python(source) return False
python
{ "resource": "" }
q2524
_filter_markdown
train
def _filter_markdown(source, filters): """Only keep some Markdown headers from a Markdown string.""" lines = source.splitlines() # Filters is a list of 'hN' strings where 1 <= N <= 6. headers = [_replace_header_filter(filter) for filter in filters] lines = [line for line in lines if line.startswith(tuple(headers))] return '\n'.join(lines)
python
{ "resource": "" }
q2525
BlockLexer.parse_lheading
train
def parse_lheading(self, m): """Parse setext heading.""" level = 1 if m.group(2) == '=' else 2 self.renderer.heading(m.group(1), level=level)
python
{ "resource": "" }
q2526
MarkdownWriter.ensure_newline
train
def ensure_newline(self, n): """Make sure there are 'n' line breaks at the end.""" assert n >= 0 text = self._output.getvalue().rstrip('\n') if not text: return self._output = StringIO() self._output.write(text) self._output.write('\n' * n) text = self._output.getvalue() assert text[-n-1] != '\n' assert text[-n:] == '\n' * n
python
{ "resource": "" }
q2527
BaseMarkdownReader._meta_from_regex
train
def _meta_from_regex(self, m): """Extract and parse YAML metadata from a meta match Notebook metadata must appear at the beginning of the file and follows the Jekyll front-matter convention of dashed delimiters: --- some: yaml --- Cell metadata follows the YAML spec of dashes and periods --- some: yaml ... Both must be followed by at least one blank line (\n\n). """ body = m.group('body') is_notebook = m.group('sep_close') == '---' if is_notebook: # make it into a valid YAML object by stripping --- body = body.strip()[:-3] + '...' try: if body: return self._meta(yaml.safe_load(m.group('body')), is_notebook) else: return self._meta({'ipymd': {'empty_meta': True}}, is_notebook) except Exception as err: raise Exception(body, err)
python
{ "resource": "" }
q2528
MarkdownReader._code_cell
train
def _code_cell(self, source): """Split the source into input and output.""" input, output = self._prompt.to_cell(source) return {'cell_type': 'code', 'input': input, 'output': output}
python
{ "resource": "" }
q2529
_preprocess
train
def _preprocess(text, tab=4): """Normalize a text.""" text = re.sub(r'\r\n|\r', '\n', text) text = text.replace('\t', ' ' * tab) text = text.replace('\u00a0', ' ') text = text.replace('\u2424', '\n') pattern = re.compile(r'^ +$', re.M) text = pattern.sub('', text) text = _rstrip_lines(text) return text
python
{ "resource": "" }
q2530
_diff
train
def _diff(text_0, text_1): """Return a diff between two strings.""" diff = difflib.ndiff(text_0.splitlines(), text_1.splitlines()) return _diff_removed_lines(diff)
python
{ "resource": "" }
q2531
_write_json
train
def _write_json(file, contents): """Write a dict to a JSON file.""" with open(file, 'w') as f: return json.dump(contents, f, indent=2, sort_keys=True)
python
{ "resource": "" }
q2532
_numbered_style
train
def _numbered_style(): """Create a numbered list style.""" style = ListStyle(name='_numbered_list') lls = ListLevelStyleNumber(level=1) lls.setAttribute('displaylevels', 1) lls.setAttribute('numsuffix', '. ') lls.setAttribute('numformat', '1') llp = ListLevelProperties() llp.setAttribute('listlevelpositionandspacemode', 'label-alignment') llla = ListLevelLabelAlignment(labelfollowedby='listtab') llla.setAttribute('listtabstopposition', '1.27cm') llla.setAttribute('textindent', '-0.635cm') llla.setAttribute('marginleft', '1.27cm') llp.addElement(llla) # llp.setAttribute('spacebefore', '') # llp.setAttribute('minlabelwidth', '') lls.addElement(llp) style.addElement(lls) return style
python
{ "resource": "" }
q2533
_create_style
train
def _create_style(name, family=None, **kwargs): """Helper function for creating a new style.""" if family == 'paragraph' and 'marginbottom' not in kwargs: kwargs['marginbottom'] = '.5cm' style = Style(name=name, family=family) # Extract paragraph properties. kwargs_par = {} keys = sorted(kwargs.keys()) for k in keys: if 'margin' in k: kwargs_par[k] = kwargs.pop(k) style.addElement(TextProperties(**kwargs)) if kwargs_par: style.addElement(ParagraphProperties(**kwargs_par)) return style
python
{ "resource": "" }
q2534
default_styles
train
def default_styles(): """Generate default ODF styles.""" styles = {} def _add_style(name, **kwargs): styles[name] = _create_style(name, **kwargs) _add_style('heading-1', family='paragraph', fontsize='24pt', fontweight='bold', ) _add_style('heading-2', family='paragraph', fontsize='22pt', fontweight='bold', ) _add_style('heading-3', family='paragraph', fontsize='20pt', fontweight='bold', ) _add_style('heading-4', family='paragraph', fontsize='18pt', fontweight='bold', ) _add_style('heading-5', family='paragraph', fontsize='16pt', fontweight='bold', ) _add_style('heading-6', family='paragraph', fontsize='14pt', fontweight='bold', ) _add_style('normal-paragraph', family='paragraph', fontsize='12pt', marginbottom='0.25cm', ) _add_style('code', family='paragraph', fontsize='10pt', fontweight='bold', fontfamily='Courier New', color='#555555', ) _add_style('quote', family='paragraph', fontsize='12pt', fontstyle='italic', ) _add_style('list-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm', ) _add_style('sublist-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm', ) _add_style('numbered-list-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm', ) _add_style('normal-text', family='text', fontsize='12pt', ) _add_style('italic', family='text', fontstyle='italic', fontsize='12pt', ) _add_style('bold', family='text', fontweight='bold', fontsize='12pt', ) _add_style('url', family='text', fontsize='12pt', fontweight='bold', fontfamily='Courier', ) _add_style('inline-code', family='text', fontsize='10pt', fontweight='bold', fontfamily='Courier New', color='#555555', ) styles['_numbered_list'] = _numbered_style() return styles
python
{ "resource": "" }
q2535
load_styles
train
def load_styles(path_or_doc): """Return a dictionary of all styles contained in an ODF document.""" if isinstance(path_or_doc, string_types): doc = load(path_or_doc) else: # Recover the OpenDocumentText instance. if isinstance(path_or_doc, ODFDocument): doc = path_or_doc._doc else: doc = path_or_doc assert isinstance(doc, OpenDocument), doc styles = {_style_name(style): style for style in doc.styles.childNodes} return styles
python
{ "resource": "" }
q2536
_item_type
train
def _item_type(item): """Indicate to the ODF reader the type of the block or text.""" tag = item['tag'] style = item.get('style', None) if tag == 'p': if style is None or 'paragraph' in style: return 'paragraph' else: return style elif tag == 'span': if style in (None, 'normal-text'): return 'text' elif style == 'url': return 'link' else: return style elif tag == 'h': assert style is not None return style elif tag in ('list', 'list-item', 'line-break'): if style == '_numbered_list': return 'numbered-list' else: return tag elif tag == 's': return 'spaces' raise Exception("The tag '{0}' with style '{1}' hasn't " "been implemented.".format(tag, style))
python
{ "resource": "" }
q2537
ODFDocument.add_styles
train
def add_styles(self, **styles): """Add ODF styles to the current document.""" for stylename in sorted(styles): self._doc.styles.addElement(styles[stylename])
python
{ "resource": "" }
q2538
ODFDocument._add_element
train
def _add_element(self, cls, **kwargs): """Add an element.""" # Convert stylename strings to actual style elements. kwargs = self._replace_stylename(kwargs) el = cls(**kwargs) self._doc.text.addElement(el)
python
{ "resource": "" }
q2539
ODFDocument._style_name
train
def _style_name(self, el): """Return the style name of an element.""" if el.attributes is None: return None style_field = ('urn:oasis:names:tc:opendocument:xmlns:text:1.0', 'style-name') name = el.attributes.get(style_field, None) if not name: return None return self._get_style_name(name)
python
{ "resource": "" }
q2540
ODFDocument.start_container
train
def start_container(self, cls, **kwargs): """Append a new container.""" # Convert stylename strings to actual style elements. kwargs = self._replace_stylename(kwargs) # Create the container. container = cls(**kwargs) self._containers.append(container)
python
{ "resource": "" }
q2541
ODFDocument.end_container
train
def end_container(self, cancel=None): """Finishes and registers the currently-active container, unless 'cancel' is True.""" if not self._containers: return container = self._containers.pop() if len(self._containers) >= 1: parent = self._containers[-1] else: parent = self._doc.text if not cancel: parent.addElement(container)
python
{ "resource": "" }
q2542
ODFDocument.container
train
def container(self, cls, **kwargs): """Container context manager.""" self.start_container(cls, **kwargs) yield self.end_container()
python
{ "resource": "" }
q2543
ODFDocument.start_paragraph
train
def start_paragraph(self, stylename=None): """Start a new paragraph.""" # Use the next paragraph style if one was set. if stylename is None: stylename = self._next_p_style or 'normal-paragraph' self.start_container(P, stylename=stylename)
python
{ "resource": "" }
q2544
ODFDocument.require_paragraph
train
def require_paragraph(self): """Create a new paragraph unless the currently-active container is already a paragraph.""" if self._containers and _is_paragraph(self._containers[-1]): return False else: self.start_paragraph() return True
python
{ "resource": "" }
q2545
ODFDocument._code_line
train
def _code_line(self, line): """Add a code line.""" assert self._containers container = self._containers[-1] # Handle extra spaces. text = line while text: if text.startswith(' '): r = re.match(r'(^ +)', text) n = len(r.group(1)) container.addElement(S(c=n)) text = text[n:] elif ' ' in text: assert not text.startswith(' ') i = text.index(' ') container.addElement(Span(text=text[:i])) text = text[i:] else: container.addElement(Span(text=text)) text = ''
python
{ "resource": "" }
q2546
ODFDocument.code
train
def code(self, text, lang=None): """Add a code block.""" # WARNING: lang is discarded currently. with self.paragraph(stylename='code'): lines = text.splitlines() for line in lines[:-1]: self._code_line(line) self.linebreak() self._code_line(lines[-1])
python
{ "resource": "" }
q2547
ODFDocument.start_numbered_list
train
def start_numbered_list(self): """Start a numbered list.""" self._ordered = True self.start_container(List, stylename='_numbered_list') self.set_next_paragraph_style('numbered-list-paragraph' if self._item_level <= 0 else 'sublist-paragraph')
python
{ "resource": "" }
q2548
ODFDocument.start_list
train
def start_list(self): """Start a list.""" self._ordered = False self.start_container(List) self.set_next_paragraph_style('list-paragraph' if self._item_level <= 0 else 'sublist-paragraph')
python
{ "resource": "" }
q2549
ODFDocument.text
train
def text(self, text, stylename=None): """Add text within the current container.""" assert self._containers container = self._containers[-1] if stylename is not None: stylename = self._get_style_name(stylename) container.addElement(Span(stylename=stylename, text=text)) else: container.addElement(Span(text=text))
python
{ "resource": "" }
q2550
_cell_output
train
def _cell_output(cell): """Return the output of an ipynb cell.""" outputs = cell.get('outputs', []) # Add stdout. stdout = ('\n'.join(_ensure_string(output.get('text', '')) for output in outputs)).rstrip() # Add text output. text_outputs = [] for output in outputs: out = output.get('data', {}).get('text/plain', []) out = _ensure_string(out) # HACK: skip <matplotlib ...> outputs. if out.startswith('<matplotlib'): continue text_outputs.append(out) return stdout + '\n'.join(text_outputs).rstrip()
python
{ "resource": "" }
q2551
FireTV._dump
train
def _dump(self, service, grep=None): """Perform a service dump. :param service: Service to dump. :param grep: Grep for this string. :returns: Dump, optionally grepped. """ if grep: return self.adb_shell('dumpsys {0} | grep "{1}"'.format(service, grep)) return self.adb_shell('dumpsys {0}'.format(service))
python
{ "resource": "" }
q2552
FireTV._dump_has
train
def _dump_has(self, service, grep, search): """Check if a dump has particular content. :param service: Service to dump. :param grep: Grep for this string. :param search: Check for this substring. :returns: Found or not. """ dump_grep = self._dump(service, grep=grep) if not dump_grep: return False return dump_grep.strip().find(search) > -1
python
{ "resource": "" }
q2553
FireTV._ps
train
def _ps(self, search=''): """Perform a ps command with optional filtering. :param search: Check for this substring. :returns: List of matching fields """ if not self.available: return result = [] ps = self.adb_streaming_shell('ps') try: for bad_line in ps: # The splitting of the StreamingShell doesn't always work # this is to ensure that we get only one line for line in bad_line.splitlines(): if search in line: result.append(line.strip().rsplit(' ', 1)[-1]) return result except InvalidChecksumError as e: print(e) self.connect() raise IOError
python
{ "resource": "" }
q2554
FireTV.connect
train
def connect(self, always_log_errors=True): """Connect to an Amazon Fire TV device. Will attempt to establish ADB connection to the given host. Failure sets state to UNKNOWN and disables sending actions. :returns: True if successful, False otherwise """ self._adb_lock.acquire(**LOCK_KWARGS) try: if not self.adb_server_ip: # python-adb try: if self.adbkey: signer = Signer(self.adbkey) # Connect to the device self._adb = adb_commands.AdbCommands().ConnectDevice(serial=self.host, rsa_keys=[signer], default_timeout_ms=9000) else: self._adb = adb_commands.AdbCommands().ConnectDevice(serial=self.host, default_timeout_ms=9000) # ADB connection successfully established self._available = True except socket_error as serr: if self._available or always_log_errors: if serr.strerror is None: serr.strerror = "Timed out trying to connect to ADB device." logging.warning("Couldn't connect to host: %s, error: %s", self.host, serr.strerror) # ADB connection attempt failed self._adb = None self._available = False finally: return self._available else: # pure-python-adb try: self._adb_client = AdbClient(host=self.adb_server_ip, port=self.adb_server_port) self._adb_device = self._adb_client.device(self.host) self._available = bool(self._adb_device) except: self._available = False finally: return self._available finally: self._adb_lock.release()
python
{ "resource": "" }
q2555
FireTV.update
train
def update(self, get_running_apps=True): """Get the state of the device, the current app, and the running apps. :param get_running_apps: whether or not to get the ``running_apps`` property :return state: the state of the device :return current_app: the current app :return running_apps: the running apps """ # The `screen_on`, `awake`, `wake_lock_size`, `current_app`, and `running_apps` properties. screen_on, awake, wake_lock_size, _current_app, running_apps = self.get_properties(get_running_apps=get_running_apps, lazy=True) # Check if device is off. if not screen_on: state = STATE_OFF current_app = None running_apps = None # Check if screen saver is on. elif not awake: state = STATE_IDLE current_app = None running_apps = None else: # Get the current app. if isinstance(_current_app, dict) and 'package' in _current_app: current_app = _current_app['package'] else: current_app = None # Get the running apps. if running_apps is None and current_app: running_apps = [current_app] # Get the state. # TODO: determine the state differently based on the `current_app`. if current_app in [PACKAGE_LAUNCHER, PACKAGE_SETTINGS]: state = STATE_STANDBY # Amazon Video elif current_app == AMAZON_VIDEO: if wake_lock_size == 5: state = STATE_PLAYING else: # wake_lock_size == 2 state = STATE_PAUSED # Netflix elif current_app == NETFLIX: if wake_lock_size > 3: state = STATE_PLAYING else: state = STATE_PAUSED # Check if `wake_lock_size` is 1 (device is playing). elif wake_lock_size == 1: state = STATE_PLAYING # Otherwise, device is paused. else: state = STATE_PAUSED return state, current_app, running_apps
python
{ "resource": "" }
q2556
FireTV.app_state
train
def app_state(self, app): """Informs if application is running.""" if not self.available or not self.screen_on: return STATE_OFF if self.current_app["package"] == app: return STATE_ON return STATE_OFF
python
{ "resource": "" }
q2557
FireTV.state
train
def state(self): """Compute and return the device state. :returns: Device state. """ # Check if device is disconnected. if not self.available: return STATE_UNKNOWN # Check if device is off. if not self.screen_on: return STATE_OFF # Check if screen saver is on. if not self.awake: return STATE_IDLE # Check if the launcher is active. if self.launcher or self.settings: return STATE_STANDBY # Check for a wake lock (device is playing). if self.wake_lock: return STATE_PLAYING # Otherwise, device is paused. return STATE_PAUSED
python
{ "resource": "" }
q2558
FireTV.available
train
def available(self): """Check whether the ADB connection is intact.""" if not self.adb_server_ip: # python-adb return bool(self._adb) # pure-python-adb try: # make sure the server is available adb_devices = self._adb_client.devices() # make sure the device is available try: # case 1: the device is currently available if any([self.host in dev.get_serial_no() for dev in adb_devices]): if not self._available: self._available = True return True # case 2: the device is not currently available if self._available: logging.error('ADB server is not connected to the device.') self._available = False return False except RuntimeError: if self._available: logging.error('ADB device is unavailable; encountered an error when searching for device.') self._available = False return False except RuntimeError: if self._available: logging.error('ADB server is unavailable.') self._available = False return False
python
{ "resource": "" }
q2559
FireTV.running_apps
train
def running_apps(self): """Return a list of running user applications.""" ps = self.adb_shell(RUNNING_APPS_CMD) if ps: return [line.strip().rsplit(' ', 1)[-1] for line in ps.splitlines() if line.strip()] return []
python
{ "resource": "" }
q2560
FireTV.current_app
train
def current_app(self): """Return the current app.""" current_focus = self.adb_shell(CURRENT_APP_CMD) if current_focus is None: return None current_focus = current_focus.replace("\r", "") matches = WINDOW_REGEX.search(current_focus) # case 1: current app was successfully found if matches: (pkg, activity) = matches.group("package", "activity") return {"package": pkg, "activity": activity} # case 2: current app could not be found logging.warning("Couldn't get current app, reply was %s", current_focus) return None
python
{ "resource": "" }
q2561
FireTV.wake_lock_size
train
def wake_lock_size(self): """Get the size of the current wake lock.""" output = self.adb_shell(WAKE_LOCK_SIZE_CMD) if not output: return None return int(output.split("=")[1].strip())
python
{ "resource": "" }
q2562
FireTV.get_properties
train
def get_properties(self, get_running_apps=True, lazy=False): """Get the ``screen_on``, ``awake``, ``wake_lock_size``, ``current_app``, and ``running_apps`` properties.""" if get_running_apps: output = self.adb_shell(SCREEN_ON_CMD + (SUCCESS1 if lazy else SUCCESS1_FAILURE0) + " && " + AWAKE_CMD + (SUCCESS1 if lazy else SUCCESS1_FAILURE0) + " && " + WAKE_LOCK_SIZE_CMD + " && " + CURRENT_APP_CMD + " && " + RUNNING_APPS_CMD) else: output = self.adb_shell(SCREEN_ON_CMD + (SUCCESS1 if lazy else SUCCESS1_FAILURE0) + " && " + AWAKE_CMD + (SUCCESS1 if lazy else SUCCESS1_FAILURE0) + " && " + WAKE_LOCK_SIZE_CMD + " && " + CURRENT_APP_CMD) # ADB command was unsuccessful if output is None: return None, None, None, None, None # `screen_on` property if not output: return False, False, -1, None, None screen_on = output[0] == '1' # `awake` property if len(output) < 2: return screen_on, False, -1, None, None awake = output[1] == '1' lines = output.strip().splitlines() # `wake_lock_size` property if len(lines[0]) < 3: return screen_on, awake, -1, None, None wake_lock_size = int(lines[0].split("=")[1].strip()) # `current_app` property if len(lines) < 2: return screen_on, awake, wake_lock_size, None, None matches = WINDOW_REGEX.search(lines[1]) if matches: # case 1: current app was successfully found (pkg, activity) = matches.group("package", "activity") current_app = {"package": pkg, "activity": activity} else: # case 2: current app could not be found current_app = None # `running_apps` property if not get_running_apps or len(lines) < 3: return screen_on, awake, wake_lock_size, current_app, None running_apps = [line.strip().rsplit(' ', 1)[-1] for line in lines[2:] if line.strip()] return screen_on, awake, wake_lock_size, current_app, running_apps
python
{ "resource": "" }
q2563
is_valid_device_id
train
def is_valid_device_id(device_id): """ Check if device identifier is valid. A valid device identifier contains only ascii word characters or dashes. :param device_id: Device identifier :returns: Valid or not. """ valid = valid_device_id.match(device_id) if not valid: logging.error("A valid device identifier contains " "only ascii word characters or dashes. " "Device '%s' not added.", device_id) return valid
python
{ "resource": "" }
q2564
add
train
def add(device_id, host, adbkey='', adb_server_ip='', adb_server_port=5037): """ Add a device. Creates FireTV instance associated with device identifier. :param device_id: Device identifier. :param host: Host in <address>:<port> format. :param adbkey: The path to the "adbkey" file :param adb_server_ip: the IP address for the ADB server :param adb_server_port: the port for the ADB server :returns: Added successfully or not. """ valid = is_valid_device_id(device_id) and is_valid_host(host) if valid: devices[device_id] = FireTV(str(host), str(adbkey), str(adb_server_ip), str(adb_server_port)) return valid
python
{ "resource": "" }
q2565
add_device
train
def add_device(): """ Add a device via HTTP POST. POST JSON in the following format :: { "device_id": "<your_device_id>", "host": "<address>:<port>", "adbkey": "<path to the adbkey file>" } """ req = request.get_json() success = False if 'device_id' in req and 'host' in req: success = add(req['device_id'], req['host'], req.get('adbkey', ''), req.get('adb_server_ip', ''), req.get('adb_server_port', 5037)) return jsonify(success=success)
python
{ "resource": "" }
q2566
list_devices
train
def list_devices(): """ List devices via HTTP GET. """ output = {} for device_id, device in devices.items(): output[device_id] = { 'host': device.host, 'state': device.state } return jsonify(devices=output)
python
{ "resource": "" }
q2567
device_state
train
def device_state(device_id): """ Get device state via HTTP GET. """ if device_id not in devices: return jsonify(success=False) return jsonify(state=devices[device_id].state)
python
{ "resource": "" }
q2568
current_app
train
def current_app(device_id): """ Get currently running app. """ if not is_valid_device_id(device_id): abort(403) if device_id not in devices: abort(404) current = devices[device_id].current_app if current is None: abort(404) return jsonify(current_app=current)
python
{ "resource": "" }
q2569
running_apps
train
def running_apps(device_id): """ Get running apps via HTTP GET. """ if not is_valid_device_id(device_id): abort(403) if device_id not in devices: abort(404) return jsonify(running_apps=devices[device_id].running_apps)
python
{ "resource": "" }
q2570
get_app_state
train
def get_app_state(device_id, app_id): """ Get the state of the requested app """ if not is_valid_app_id(app_id): abort(403) if not is_valid_device_id(device_id): abort(403) if device_id not in devices: abort(404) app_state = devices[device_id].app_state(app_id) return jsonify(state=app_state, status=app_state)
python
{ "resource": "" }
q2571
device_action
train
def device_action(device_id, action_id): """ Initiate device action via HTTP GET. """ success = False if device_id in devices: input_cmd = getattr(devices[device_id], action_id, None) if callable(input_cmd): input_cmd() success = True return jsonify(success=success)
python
{ "resource": "" }
q2572
app_start
train
def app_start(device_id, app_id): """ Starts an app with corresponding package name""" if not is_valid_app_id(app_id): abort(403) if not is_valid_device_id(device_id): abort(403) if device_id not in devices: abort(404) success = devices[device_id].launch_app(app_id) return jsonify(success=success)
python
{ "resource": "" }
q2573
app_stop
train
def app_stop(device_id, app_id): """ stops an app with corresponding package name""" if not is_valid_app_id(app_id): abort(403) if not is_valid_device_id(device_id): abort(403) if device_id not in devices: abort(404) success = devices[device_id].stop_app(app_id) return jsonify(success=success)
python
{ "resource": "" }
q2574
device_connect
train
def device_connect(device_id): """ Force a connection attempt via HTTP GET. """ success = False if device_id in devices: devices[device_id].connect() success = True return jsonify(success=success)
python
{ "resource": "" }
q2575
_parse_config
train
def _parse_config(config_file_path): """ Parse Config File from yaml file. """ config_file = open(config_file_path, 'r') config = yaml.load(config_file) config_file.close() return config
python
{ "resource": "" }
q2576
_add_devices_from_config
train
def _add_devices_from_config(args): """ Add devices from config. """ config = _parse_config(args.config) for device in config['devices']: if args.default: if device == "default": raise ValueError('devicename "default" in config is not allowed if default param is set') if config['devices'][device]['host'] == args.default: raise ValueError('host set in default param must not be defined in config') add(device, config['devices'][device]['host'], config['devices'][device].get('adbkey', ''), config['devices'][device].get('adb_server_ip', ''), config['devices'][device].get('adb_server_port', 5037))
python
{ "resource": "" }
q2577
main
train
def main(): """ Set up the server. """ parser = argparse.ArgumentParser(description='AFTV Server') parser.add_argument('-p', '--port', type=int, help='listen port', default=5556) parser.add_argument('-d', '--default', help='default Amazon Fire TV host', nargs='?') parser.add_argument('-c', '--config', type=str, help='Path to config file') args = parser.parse_args() if args.config: _add_devices_from_config(args) if args.default and not add('default', args.default): exit('invalid hostname') app.run(host='0.0.0.0', port=args.port)
python
{ "resource": "" }
q2578
ProfanityFilter._load_words
train
def _load_words(self): """Loads the list of profane words from file.""" with open(self._words_file, 'r') as f: self._censor_list = [line.strip() for line in f.readlines()]
python
{ "resource": "" }
q2579
ProfanityFilter.get_profane_words
train
def get_profane_words(self): """Returns all profane words currently in use.""" profane_words = [] if self._custom_censor_list: profane_words = [w for w in self._custom_censor_list] # Previous versions of Python don't have list.copy() else: profane_words = [w for w in self._censor_list] profane_words.extend(self._extra_censor_list) profane_words.extend([inflection.pluralize(word) for word in profane_words]) profane_words = list(set(profane_words)) # We sort the list based on decreasing word length so that words like # 'fu' aren't substituted before 'fuck' if no_word_boundaries = true profane_words.sort(key=len) profane_words.reverse() return profane_words
python
{ "resource": "" }
q2580
ProfanityFilter.censor
train
def censor(self, input_text): """Returns input_text with any profane words censored.""" bad_words = self.get_profane_words() res = input_text for word in bad_words: # Apply word boundaries to the bad word regex_string = r'{0}' if self._no_word_boundaries else r'\b{0}\b' regex_string = regex_string.format(word) regex = re.compile(regex_string, re.IGNORECASE) res = regex.sub(self._censor_char * len(word), res) return res
python
{ "resource": "" }
q2581
SMB2NetworkInterfaceInfo.unpack_multiple
train
def unpack_multiple(data): """ Get's a list of SMB2NetworkInterfaceInfo messages from the byte value passed in. This is the raw buffer value that is set on the SMB2IOCTLResponse message. :param data: bytes of the messages :return: List of SMB2NetworkInterfaceInfo messages """ chunks = [] while data: info = SMB2NetworkInterfaceInfo() data = info.unpack(data) chunks.append(info) return chunks
python
{ "resource": "" }
q2582
CreateContextName.get_response_structure
train
def get_response_structure(name): """ Returns the response structure for a know list of create context responses. :param name: The constant value above :return: The response structure or None if unknown """ return { CreateContextName.SMB2_CREATE_DURABLE_HANDLE_REQUEST: SMB2CreateDurableHandleResponse(), CreateContextName.SMB2_CREATE_DURABLE_HANDLE_RECONNECT: SMB2CreateDurableHandleReconnect(), CreateContextName.SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST: SMB2CreateQueryMaximalAccessResponse(), CreateContextName.SMB2_CREATE_REQUEST_LEASE: SMB2CreateResponseLease(), CreateContextName.SMB2_CREATE_QUERY_ON_DISK_ID: SMB2CreateQueryOnDiskIDResponse(), CreateContextName.SMB2_CREATE_REQUEST_LEASE_V2: SMB2CreateResponseLeaseV2(), CreateContextName.SMB2_CREATE_DURABLE_HANDLE_REQUEST_V2: SMB2CreateDurableHandleResponseV2(), CreateContextName.SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2: SMB2CreateDurableHandleReconnectV2, CreateContextName.SMB2_CREATE_APP_INSTANCE_ID: SMB2CreateAppInstanceId(), CreateContextName.SMB2_CREATE_APP_INSTANCE_VERSION: SMB2CreateAppInstanceVersion() }.get(name, None)
python
{ "resource": "" }
q2583
SMB2CreateContextRequest.get_context_data
train
def get_context_data(self): """ Get the buffer_data value of a context response and try to convert it to the relevant structure based on the buffer_name used. If it is an unknown structure then the raw bytes are returned. :return: relevant Structure of buffer_data or bytes if unknown name """ buffer_name = self['buffer_name'].get_value() structure = CreateContextName.get_response_structure(buffer_name) if structure: structure.unpack(self['buffer_data'].get_value()) return structure else: # unknown structure, just return the raw bytes return self['buffer_data'].get_value()
python
{ "resource": "" }
q2584
SMB2CreateEABuffer.pack_multiple
train
def pack_multiple(messages): """ Converts a list of SMB2CreateEABuffer structures and packs them as a bytes object used when setting to the SMB2CreateContextRequest buffer_data field. This should be used as it would calculate the correct next_entry_offset field value for each buffer entry. :param messages: List of SMB2CreateEABuffer structures :return: bytes object that is set on the SMB2CreateContextRequest buffer_data field. """ data = b"" msg_count = len(messages) for i, msg in enumerate(messages): if i == msg_count - 1: msg['next_entry_offset'] = 0 else: # because the end padding val won't be populated if the entry # offset is 0, we set to 1 so the len calc is correct msg['next_entry_offset'] = 1 msg['next_entry_offset'] = len(msg) data += msg.pack() return data
python
{ "resource": "" }
q2585
TreeConnect.connect
train
def connect(self, require_secure_negotiate=True): """ Connect to the share. :param require_secure_negotiate: For Dialects 3.0 and 3.0.2, will verify the negotiation parameters with the server to prevent SMB downgrade attacks """ log.info("Session: %s - Creating connection to share %s" % (self.session.username, self.share_name)) utf_share_name = self.share_name.encode('utf-16-le') connect = SMB2TreeConnectRequest() connect['buffer'] = utf_share_name log.info("Session: %s - Sending Tree Connect message" % self.session.username) log.debug(str(connect)) request = self.session.connection.send(connect, sid=self.session.session_id) log.info("Session: %s - Receiving Tree Connect response" % self.session.username) response = self.session.connection.receive(request) tree_response = SMB2TreeConnectResponse() tree_response.unpack(response['data'].get_value()) log.debug(str(tree_response)) # https://msdn.microsoft.com/en-us/library/cc246687.aspx self.tree_connect_id = response['tree_id'].get_value() log.info("Session: %s - Created tree connection with ID %d" % (self.session.username, self.tree_connect_id)) self._connected = True self.session.tree_connect_table[self.tree_connect_id] = self capabilities = tree_response['capabilities'] self.is_dfs_share = capabilities.has_flag( ShareCapabilities.SMB2_SHARE_CAP_DFS) self.is_ca_share = capabilities.has_flag( ShareCapabilities.SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY) dialect = self.session.connection.dialect if dialect >= Dialects.SMB_3_0_0 and \ self.session.connection.supports_encryption: self.encrypt_data = tree_response['share_flags'].has_flag( ShareFlags.SMB2_SHAREFLAG_ENCRYPT_DATA) self.is_scaleout_share = capabilities.has_flag( ShareCapabilities.SMB2_SHARE_CAP_SCALEOUT) # secure negotiate is only valid for SMB 3 dialects before 3.1.1 if dialect < Dialects.SMB_3_1_1 and require_secure_negotiate: self._verify_dialect_negotiate()
python
{ "resource": "" }
q2586
TreeConnect.disconnect
train
def disconnect(self): """ Disconnects the tree connection. """ if not self._connected: return log.info("Session: %s, Tree: %s - Disconnecting from Tree Connect" % (self.session.username, self.share_name)) req = SMB2TreeDisconnect() log.info("Session: %s, Tree: %s - Sending Tree Disconnect message" % (self.session.username, self.share_name)) log.debug(str(req)) request = self.session.connection.send(req, sid=self.session.session_id, tid=self.tree_connect_id) log.info("Session: %s, Tree: %s - Receiving Tree Disconnect response" % (self.session.username, self.share_name)) res = self.session.connection.receive(request) res_disconnect = SMB2TreeDisconnect() res_disconnect.unpack(res['data'].get_value()) log.debug(str(res_disconnect)) self._connected = False del self.session.tree_connect_table[self.tree_connect_id]
python
{ "resource": "" }
q2587
Connection.disconnect
train
def disconnect(self, close=True): """ Closes the connection as well as logs off any of the Disconnects the TCP connection and shuts down the socket listener running in a thread. :param close: Will close all sessions in the connection as well as the tree connections of each session. """ if close: for session in list(self.session_table.values()): session.disconnect(True) log.info("Disconnecting transport connection") self.transport.disconnect()
python
{ "resource": "" }
q2588
Connection.send
train
def send(self, message, sid=None, tid=None, credit_request=None): """ Will send a message to the server that is passed in. The final unencrypted header is returned to the function that called this. :param message: An SMB message structure to send :param sid: A session_id that the message is sent for :param tid: A tree_id object that the message is sent for :param credit_request: Specifies extra credits to be requested with the SMB header :return: Request of the message that was sent """ header = self._generate_packet_header(message, sid, tid, credit_request) # get the actual Session and TreeConnect object instead of the IDs session = self.session_table.get(sid, None) if sid else None tree = None if tid and session: if tid not in session.tree_connect_table.keys(): error_msg = "Cannot find Tree with the ID %d in the session " \ "tree table" % tid raise smbprotocol.exceptions.SMBException(error_msg) tree = session.tree_connect_table[tid] if session and session.signing_required and session.signing_key: self._sign(header, session) request = Request(header) self.outstanding_requests[header['message_id'].get_value()] = request send_data = header.pack() if (session and session.encrypt_data) or (tree and tree.encrypt_data): send_data = self._encrypt(send_data, session) self.transport.send(send_data) return request
python
{ "resource": "" }
q2589
Connection.send_compound
train
def send_compound(self, messages, sid, tid, related=False): """ Sends multiple messages within 1 TCP request, will fail if the size of the total length exceeds the maximum of the transport max. :param messages: A list of messages to send to the server :param sid: The session_id that the request is sent for :param tid: A tree_id object that the message is sent for :return: List<Request> for each request that was sent, each entry in the list is in the same order of the message list that was passed in """ send_data = b"" session = self.session_table[sid] tree = session.tree_connect_table[tid] requests = [] total_requests = len(messages) for i, message in enumerate(messages): if i == total_requests - 1: next_command = 0 padding = b"" else: msg_length = 64 + len(message) # each compound message must start at the 8-byte boundary mod = msg_length % 8 padding_length = 8 - mod if mod > 0 else 0 padding = b"\x00" * padding_length next_command = msg_length + padding_length header = self._generate_packet_header(message, sid, tid, None) header['next_command'] = next_command if i != 0 and related: header['session_id'] = b"\xff" * 8 header['tree_id'] = b"\xff" * 4 header['flags'].set_flag( Smb2Flags.SMB2_FLAGS_RELATED_OPERATIONS ) if session.signing_required and session.signing_key: self._sign(header, session, padding=padding) send_data += header.pack() + padding request = Request(header) requests.append(request) self.outstanding_requests[header['message_id'].get_value()] = \ request if session.encrypt_data or tree.encrypt_data: send_data = self._encrypt(send_data, session) self.transport.send(send_data) return requests
python
{ "resource": "" }
q2590
Connection.receive
train
def receive(self, request, wait=True, timeout=None): """ Polls the message buffer of the TCP connection and waits until a valid message is received based on the message_id passed in. :param request: The Request object to wait get the response for :param wait: Wait for the final response in the case of a STATUS_PENDING response, the pending response is returned in the case of wait=False :param timeout: Set a timeout used while waiting for a response from the server :return: SMB2HeaderResponse of the received message """ start_time = time.time() # check if we have received a response while True: self._flush_message_buffer() status = request.response['status'].get_value() if \ request.response else None if status is not None and (wait and status != NtStatus.STATUS_PENDING): break current_time = time.time() - start_time if timeout and (current_time > timeout): error_msg = "Connection timeout of %d seconds exceeded while" \ " waiting for a response from the server" \ % timeout raise smbprotocol.exceptions.SMBException(error_msg) response = request.response status = response['status'].get_value() if status not in [NtStatus.STATUS_SUCCESS, NtStatus.STATUS_PENDING]: raise smbprotocol.exceptions.SMBResponseException(response, status) # now we have a retrieval request for the response, we can delete # the request from the outstanding requests message_id = request.message['message_id'].get_value() del self.outstanding_requests[message_id] return response
python
{ "resource": "" }
q2591
Connection.echo
train
def echo(self, sid=0, timeout=60, credit_request=1): """ Sends an SMB2 Echo request to the server. This can be used to request more credits from the server with the credit_request param. On a Samba server, the sid can be 0 but for a Windows SMB Server, the sid of an authenticated session must be passed into this function or else the socket will close. :param sid: When talking to a Windows host this must be populated with a valid session_id from a negotiated session :param timeout: The timeout in seconds to wait for the Echo Response :param credit_request: The number of credits to request :return: the credits that were granted by the server """ log.info("Sending Echo request with a timeout of %d and credit " "request of %d" % (timeout, credit_request)) echo_msg = SMB2Echo() log.debug(str(echo_msg)) req = self.send(echo_msg, sid=sid, credit_request=credit_request) log.info("Receiving Echo response") response = self.receive(req, timeout=timeout) log.info("Credits granted from the server echo response: %d" % response['credit_response'].get_value()) echo_resp = SMB2Echo() echo_resp.unpack(response['data'].get_value()) log.debug(str(echo_resp)) return response['credit_response'].get_value()
python
{ "resource": "" }
q2592
Connection._flush_message_buffer
train
def _flush_message_buffer(self): """ Loops through the transport message_buffer until there are no messages left in the queue. Each response is assigned to the Request object based on the message_id which are then available in self.outstanding_requests """ while True: message_bytes = self.transport.receive() # there were no messages receives, so break from the loop if message_bytes is None: break # check if the message is encrypted and decrypt if necessary if message_bytes[:4] == b"\xfdSMB": message = SMB2TransformHeader() message.unpack(message_bytes) message_bytes = self._decrypt(message) # now retrieve message(s) from response is_last = False session_id = None while not is_last: next_command = struct.unpack("<L", message_bytes[20:24])[0] header_length = \ next_command if next_command != 0 else len(message_bytes) header_bytes = message_bytes[:header_length] message = SMB2HeaderResponse() message.unpack(header_bytes) flags = message['flags'] if not flags.has_flag(Smb2Flags.SMB2_FLAGS_RELATED_OPERATIONS): session_id = message['session_id'].get_value() self._verify(message, session_id) message_id = message['message_id'].get_value() request = self.outstanding_requests.get(message_id, None) if not request: error_msg = "Received response with an unknown message " \ "ID: %d" % message_id raise smbprotocol.exceptions.SMBException(error_msg) # add the upper credit limit based on the credits granted by # the server credit_response = message['credit_response'].get_value() self.sequence_window['high'] += \ credit_response if credit_response > 0 else 1 request.response = message self.outstanding_requests[message_id] = request message_bytes = message_bytes[header_length:] is_last = next_command == 0
python
{ "resource": "" }
q2593
Connection._calculate_credit_charge
train
def _calculate_credit_charge(self, message): """ Calculates the credit charge for a request based on the command. If connection.supports_multi_credit is not True then the credit charge isn't valid so it returns 0. The credit charge is the number of credits that are required for sending/receiving data over 64 kilobytes, in the existing messages only the Read, Write, Query Directory or IOCTL commands will end in this scenario and each require their own calculation to get the proper value. The generic formula for calculating the credit charge is https://msdn.microsoft.com/en-us/library/dn529312.aspx (max(SendPayloadSize, Expected ResponsePayloadSize) - 1) / 65536 + 1 :param message: The message being sent :return: The credit charge to set on the header """ credit_size = 65536 if not self.supports_multi_credit: credit_charge = 0 elif message.COMMAND == Commands.SMB2_READ: max_size = message['length'].get_value() + \ message['read_channel_info_length'].get_value() - 1 credit_charge = math.ceil(max_size / credit_size) elif message.COMMAND == Commands.SMB2_WRITE: max_size = message['length'].get_value() + \ message['write_channel_info_length'].get_value() - 1 credit_charge = math.ceil(max_size / credit_size) elif message.COMMAND == Commands.SMB2_IOCTL: max_in_size = len(message['buffer']) max_out_size = message['max_output_response'].get_value() max_size = max(max_in_size, max_out_size) - 1 credit_charge = math.ceil(max_size / credit_size) elif message.COMMAND == Commands.SMB2_QUERY_DIRECTORY: max_in_size = len(message['buffer']) max_out_size = message['output_buffer_length'].get_value() max_size = max(max_in_size, max_out_size) - 1 credit_charge = math.ceil(max_size / credit_size) else: credit_charge = 1 # python 2 returns a float where we need an integer return int(credit_charge)
python
{ "resource": "" }
q2594
Session.disconnect
train
def disconnect(self, close=True): """ Logs off the session :param close: Will close all tree connects in a session """ if not self._connected: # already disconnected so let's return return if close: for open in list(self.open_table.values()): open.close(False) for tree in list(self.tree_connect_table.values()): tree.disconnect() log.info("Session: %s - Logging off of SMB Session" % self.username) logoff = SMB2Logoff() log.info("Session: %s - Sending Logoff message" % self.username) log.debug(str(logoff)) request = self.connection.send(logoff, sid=self.session_id) log.info("Session: %s - Receiving Logoff response" % self.username) res = self.connection.receive(request) res_logoff = SMB2Logoff() res_logoff.unpack(res['data'].get_value()) log.debug(str(res_logoff)) self._connected = False del self.connection.session_table[self.session_id]
python
{ "resource": "" }
q2595
Field.pack
train
def pack(self): """ Packs the field value into a byte string so it can be sent to the server. :param structure: The message structure class object :return: A byte string of the packed field's value """ value = self._get_calculated_value(self.value) packed_value = self._pack_value(value) size = self._get_calculated_size(self.size, packed_value) if len(packed_value) != size: raise ValueError("Invalid packed data length for field %s of %d " "does not fit field size of %d" % (self.name, len(packed_value), size)) return packed_value
python
{ "resource": "" }
q2596
Field.set_value
train
def set_value(self, value): """ Parses, and sets the value attribute for the field. :param value: The value to be parsed and set, the allowed input types vary depending on the Field used """ parsed_value = self._parse_value(value) self.value = parsed_value
python
{ "resource": "" }
q2597
Field.unpack
train
def unpack(self, data): """ Takes in a byte string and set's the field value based on field definition. :param structure: The message structure class object :param data: The byte string of the data to unpack :return: The remaining data for subsequent fields """ size = self._get_calculated_size(self.size, data) self.set_value(data[0:size]) return data[len(self):]
python
{ "resource": "" }
q2598
Field._get_calculated_value
train
def _get_calculated_value(self, value): """ Get's the final value of the field and runs the lambda functions recursively until a final value is derived. :param value: The value to calculate/expand :return: The final value """ if isinstance(value, types.LambdaType): expanded_value = value(self.structure) return self._get_calculated_value(expanded_value) else: # perform one final parsing of the value in case lambda value # returned a different type return self._parse_value(value)
python
{ "resource": "" }
q2599
Field._get_struct_format
train
def _get_struct_format(self, size): """ Get's the format specified for use in struct. This is only designed for 1, 2, 4, or 8 byte values and will throw an exception if it is anything else. :param size: The size as an int :return: The struct format specifier for the size specified """ if isinstance(size, types.LambdaType): size = size(self.structure) struct_format = { 1: 'B', 2: 'H', 4: 'L', 8: 'Q' } if size not in struct_format.keys(): raise InvalidFieldDefinition("Cannot struct format of size %s" % size) return struct_format[size]
python
{ "resource": "" }